]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-3.0-3.17.2-201411091054.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.0-3.17.2-201411091054.patch
CommitLineData
aab65bf6
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 9de9813..1462492 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -3,9 +3,11 @@
6 *.bc
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -15,6 +17,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -51,14 +54,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -72,9 +78,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -83,6 +91,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -95,32 +104,40 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 crc32table.h*
90 cscope.*
91 defkeymap.c
92+devicetable-offsets.h
93 devlist.h*
94 dnotify_test
95 docproc
96 dslm
97+dtc-lexer.lex.c
98 elf2ecoff
99 elfconfig.h*
100 evergreen_reg_safe.h
101+exception_policy.conf
102 fixdep
103 flask.h
104 fore200e_mkfirm
105@@ -128,12 +145,15 @@ fore200e_pca_fw.c*
106 gconf
107 gconf.glade.h
108 gen-devlist
109+gen-kdb_cmds.c
110 gen_crc32table
111 gen_init_cpio
112 generated
113 genheaders
114 genksyms
115 *_gray256.c
116+hash
117+hid-example
118 hpet_example
119 hugepage-mmap
120 hugepage-shm
121@@ -148,14 +168,14 @@ int32.c
122 int4.c
123 int8.c
124 kallsyms
125-kconfig
126+kern_constants.h
127 keywords.c
128 ksym.c*
129 ksym.h*
130 kxgettext
131 lex.c
132 lex.*.c
133-linux
134+lib1funcs.S
135 logo_*.c
136 logo_*_clut224.c
137 logo_*_mono.c
138@@ -165,14 +185,15 @@ mach-types.h
139 machtypes.h
140 map
141 map_hugetlb
142-media
143 mconf
144+mdp
145 miboot*
146 mk_elfconfig
147 mkboot
148 mkbugboot
149 mkcpustr
150 mkdep
151+mkpiggy
152 mkprep
153 mkregtable
154 mktables
155@@ -188,6 +209,8 @@ oui.c*
156 page-types
157 parse.c
158 parse.h
159+parse-events*
160+pasyms.h
161 patches*
162 pca200e.bin
163 pca200e_ecd.bin2
164@@ -197,6 +220,7 @@ perf-archive
165 piggyback
166 piggy.gzip
167 piggy.S
168+pmu-*
169 pnmtologo
170 ppc_defs.h*
171 pss_boot.h
172@@ -206,7 +230,12 @@ r200_reg_safe.h
173 r300_reg_safe.h
174 r420_reg_safe.h
175 r600_reg_safe.h
176+randomize_layout_hash.h
177+randomize_layout_seed.h
178+realmode.lds
179+realmode.relocs
180 recordmcount
181+regdb.c
182 relocs
183 rlim_names.h
184 rn50_reg_safe.h
185@@ -216,8 +245,12 @@ series
186 setup
187 setup.bin
188 setup.elf
189+signing_key*
190+size_overflow_hash.h
191 sImage
192+slabinfo
193 sm_tbl*
194+sortextable
195 split-include
196 syscalltab.h
197 tables.c
198@@ -227,6 +260,7 @@ tftpboot.img
199 timeconst.h
200 times.h*
201 trix_boot.h
202+user_constants.h
203 utsrelease.h*
204 vdso-syms.lds
205 vdso.lds
206@@ -238,13 +272,17 @@ vdso32.lds
207 vdso32.so.dbg
208 vdso64.lds
209 vdso64.so.dbg
210+vdsox32.lds
211+vdsox32-syms.lds
212 version.h*
213 vmImage
214 vmlinux
215 vmlinux-*
216 vmlinux.aout
217 vmlinux.bin.all
218+vmlinux.bin.bz2
219 vmlinux.lds
220+vmlinux.relocs
221 vmlinuz
222 voffset.h
223 vsyscall.lds
224@@ -252,9 +290,12 @@ vsyscall_32.lds
225 wanxlfw.inc
226 uImage
227 unifdef
228+utsrelease.h
229 wakeup.bin
230 wakeup.elf
231 wakeup.lds
232+x509*
233 zImage*
234 zconf.hash.c
235+zconf.lex.c
236 zoffset.h
237diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
238index 764f599..c600e2f 100644
239--- a/Documentation/kbuild/makefiles.txt
240+++ b/Documentation/kbuild/makefiles.txt
241@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
242 === 4 Host Program support
243 --- 4.1 Simple Host Program
244 --- 4.2 Composite Host Programs
245- --- 4.3 Using C++ for host programs
246- --- 4.4 Controlling compiler options for host programs
247- --- 4.5 When host programs are actually built
248- --- 4.6 Using hostprogs-$(CONFIG_FOO)
249+ --- 4.3 Defining shared libraries
250+ --- 4.4 Using C++ for host programs
251+ --- 4.5 Controlling compiler options for host programs
252+ --- 4.6 When host programs are actually built
253+ --- 4.7 Using hostprogs-$(CONFIG_FOO)
254
255 === 5 Kbuild clean infrastructure
256
257@@ -642,7 +643,29 @@ Both possibilities are described in the following.
258 Finally, the two .o files are linked to the executable, lxdialog.
259 Note: The syntax <executable>-y is not permitted for host-programs.
260
261---- 4.3 Using C++ for host programs
262+--- 4.3 Defining shared libraries
263+
264+ Objects with extension .so are considered shared libraries, and
265+ will be compiled as position independent objects.
266+ Kbuild provides support for shared libraries, but the usage
267+ shall be restricted.
268+ In the following example the libkconfig.so shared library is used
269+ to link the executable conf.
270+
271+ Example:
272+ #scripts/kconfig/Makefile
273+ hostprogs-y := conf
274+ conf-objs := conf.o libkconfig.so
275+ libkconfig-objs := expr.o type.o
276+
277+ Shared libraries always require a corresponding -objs line, and
278+ in the example above the shared library libkconfig is composed by
279+ the two objects expr.o and type.o.
280+ expr.o and type.o will be built as position independent code and
281+ linked as a shared library libkconfig.so. C++ is not supported for
282+ shared libraries.
283+
284+--- 4.4 Using C++ for host programs
285
286 kbuild offers support for host programs written in C++. This was
287 introduced solely to support kconfig, and is not recommended
288@@ -665,7 +688,7 @@ Both possibilities are described in the following.
289 qconf-cxxobjs := qconf.o
290 qconf-objs := check.o
291
292---- 4.4 Controlling compiler options for host programs
293+--- 4.5 Controlling compiler options for host programs
294
295 When compiling host programs, it is possible to set specific flags.
296 The programs will always be compiled utilising $(HOSTCC) passed
297@@ -693,7 +716,7 @@ Both possibilities are described in the following.
298 When linking qconf, it will be passed the extra option
299 "-L$(QTDIR)/lib".
300
301---- 4.5 When host programs are actually built
302+--- 4.6 When host programs are actually built
303
304 Kbuild will only build host-programs when they are referenced
305 as a prerequisite.
306@@ -724,7 +747,7 @@ Both possibilities are described in the following.
307 This will tell kbuild to build lxdialog even if not referenced in
308 any rule.
309
310---- 4.6 Using hostprogs-$(CONFIG_FOO)
311+--- 4.7 Using hostprogs-$(CONFIG_FOO)
312
313 A typical pattern in a Kbuild file looks like this:
314
315diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
316index 1edd5fd..107ff46 100644
317--- a/Documentation/kernel-parameters.txt
318+++ b/Documentation/kernel-parameters.txt
319@@ -1155,6 +1155,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
320 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
321 Default: 1024
322
323+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
324+ ignore grsecurity's /proc restrictions
325+
326+
327 hashdist= [KNL,NUMA] Large hashes allocated during boot
328 are distributed across NUMA nodes. Defaults on
329 for 64-bit NUMA, off otherwise.
330@@ -2175,6 +2179,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
331 noexec=on: enable non-executable mappings (default)
332 noexec=off: disable non-executable mappings
333
334+ nopcid [X86-64]
335+ Disable PCID (Process-Context IDentifier) even if it
336+ is supported by the processor.
337+
338 nosmap [X86]
339 Disable SMAP (Supervisor Mode Access Prevention)
340 even if it is supported by processor.
341@@ -2467,6 +2475,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
342 the specified number of seconds. This is to be used if
343 your oopses keep scrolling off the screen.
344
345+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
346+ virtualization environments that don't cope well with the
347+ expand down segment used by UDEREF on X86-32 or the frequent
348+ page table updates on X86-64.
349+
350+ pax_sanitize_slab=
351+ Format: { 0 | 1 | off | fast | full }
352+ Options '0' and '1' are only provided for backward
353+ compatibility, 'off' or 'fast' should be used instead.
354+ 0|off : disable slab object sanitization
355+ 1|fast: enable slab object sanitization excluding
356+ whitelisted slabs (default)
357+ full : sanitize all slabs, even the whitelisted ones
358+
359+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
360+
361+ pax_extra_latent_entropy
362+ Enable a very simple form of latent entropy extraction
363+ from the first 4GB of memory as the bootmem allocator
364+ passes the memory pages to the buddy allocator.
365+
366+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
367+ when the processor supports PCID.
368+
369 pcbit= [HW,ISDN]
370
371 pcd. [PARIDE]
372diff --git a/Makefile b/Makefile
373index 390afde..33153b5 100644
374--- a/Makefile
375+++ b/Makefile
376@@ -303,8 +303,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
377
378 HOSTCC = gcc
379 HOSTCXX = g++
380-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
381-HOSTCXXFLAGS = -O2
382+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
383+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
384+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
385
386 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
387 HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
388@@ -450,8 +451,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
389 # Rules shared between *config targets and build targets
390
391 # Basic helpers built in scripts/
392-PHONY += scripts_basic
393-scripts_basic:
394+PHONY += scripts_basic gcc-plugins
395+scripts_basic: gcc-plugins
396 $(Q)$(MAKE) $(build)=scripts/basic
397 $(Q)rm -f .tmp_quiet_recordmcount
398
399@@ -625,6 +626,72 @@ endif
400 # Tell gcc to never replace conditional load with a non-conditional one
401 KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
402
403+ifndef DISABLE_PAX_PLUGINS
404+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
405+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
406+else
407+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
408+endif
409+ifneq ($(PLUGINCC),)
410+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
411+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
412+endif
413+ifdef CONFIG_PAX_MEMORY_STACKLEAK
414+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
415+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
416+endif
417+ifdef CONFIG_KALLOCSTAT_PLUGIN
418+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
419+endif
420+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
421+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
422+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
423+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
424+endif
425+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
426+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
427+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
428+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
429+endif
430+endif
431+ifdef CONFIG_CHECKER_PLUGIN
432+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
433+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
434+endif
435+endif
436+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
437+ifdef CONFIG_PAX_SIZE_OVERFLOW
438+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
439+endif
440+ifdef CONFIG_PAX_LATENT_ENTROPY
441+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
442+endif
443+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
444+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
445+endif
446+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
447+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
448+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
449+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
450+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
451+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
452+ifeq ($(KBUILD_EXTMOD),)
453+gcc-plugins:
454+ $(Q)$(MAKE) $(build)=tools/gcc
455+else
456+gcc-plugins: ;
457+endif
458+else
459+gcc-plugins:
460+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
461+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
462+else
463+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
464+endif
465+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
466+endif
467+endif
468+
469 ifdef CONFIG_READABLE_ASM
470 # Disable optimizations that make assembler listings hard to read.
471 # reorder blocks reorders the control in the function
472@@ -717,7 +784,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
473 else
474 KBUILD_CFLAGS += -g
475 endif
476-KBUILD_AFLAGS += -Wa,-gdwarf-2
477+KBUILD_AFLAGS += -Wa,--gdwarf-2
478 endif
479 ifdef CONFIG_DEBUG_INFO_DWARF4
480 KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
481@@ -867,7 +934,7 @@ export mod_sign_cmd
482
483
484 ifeq ($(KBUILD_EXTMOD),)
485-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
486+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
487
488 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
489 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
490@@ -916,6 +983,8 @@ endif
491
492 # The actual objects are generated when descending,
493 # make sure no implicit rule kicks in
494+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
495+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
496 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
497
498 # Handle descending into subdirectories listed in $(vmlinux-dirs)
499@@ -925,7 +994,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
500 # Error messages still appears in the original language
501
502 PHONY += $(vmlinux-dirs)
503-$(vmlinux-dirs): prepare scripts
504+$(vmlinux-dirs): gcc-plugins prepare scripts
505 $(Q)$(MAKE) $(build)=$@
506
507 define filechk_kernel.release
508@@ -968,10 +1037,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
509
510 archprepare: archheaders archscripts prepare1 scripts_basic
511
512+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
513+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
514 prepare0: archprepare FORCE
515 $(Q)$(MAKE) $(build)=.
516
517 # All the preparing..
518+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
519 prepare: prepare0
520
521 # Generate some files
522@@ -1086,6 +1158,8 @@ all: modules
523 # using awk while concatenating to the final file.
524
525 PHONY += modules
526+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
527+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
528 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
529 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
530 @$(kecho) ' Building modules, stage 2.';
531@@ -1101,7 +1175,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
532
533 # Target to prepare building external modules
534 PHONY += modules_prepare
535-modules_prepare: prepare scripts
536+modules_prepare: gcc-plugins prepare scripts
537
538 # Target to install modules
539 PHONY += modules_install
540@@ -1167,7 +1241,10 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
541 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
542 signing_key.priv signing_key.x509 x509.genkey \
543 extra_certificates signing_key.x509.keyid \
544- signing_key.x509.signer include/linux/version.h
545+ signing_key.x509.signer include/linux/version.h \
546+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
547+ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
548+ tools/gcc/randomize_layout_seed.h
549
550 # clean - Delete most, but leave enough to build external modules
551 #
552@@ -1206,7 +1283,7 @@ distclean: mrproper
553 @find $(srctree) $(RCS_FIND_IGNORE) \
554 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
555 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
556- -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
557+ -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
558 -type f -print | xargs rm -f
559
560
561@@ -1372,6 +1449,8 @@ PHONY += $(module-dirs) modules
562 $(module-dirs): crmodverdir $(objtree)/Module.symvers
563 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
564
565+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
566+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
567 modules: $(module-dirs)
568 @$(kecho) ' Building modules, stage 2.';
569 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
570@@ -1512,17 +1591,21 @@ else
571 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
572 endif
573
574-%.s: %.c prepare scripts FORCE
575+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
576+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
577+%.s: %.c gcc-plugins prepare scripts FORCE
578 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
579 %.i: %.c prepare scripts FORCE
580 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
581-%.o: %.c prepare scripts FORCE
582+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
583+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
584+%.o: %.c gcc-plugins prepare scripts FORCE
585 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
586 %.lst: %.c prepare scripts FORCE
587 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
588-%.s: %.S prepare scripts FORCE
589+%.s: %.S gcc-plugins prepare scripts FORCE
590 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
591-%.o: %.S prepare scripts FORCE
592+%.o: %.S gcc-plugins prepare scripts FORCE
593 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
594 %.symtypes: %.c prepare scripts FORCE
595 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
596@@ -1532,11 +1615,15 @@ endif
597 $(cmd_crmodverdir)
598 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
599 $(build)=$(build-dir)
600-%/: prepare scripts FORCE
601+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
602+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
603+%/: gcc-plugins prepare scripts FORCE
604 $(cmd_crmodverdir)
605 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
606 $(build)=$(build-dir)
607-%.ko: prepare scripts FORCE
608+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
609+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
610+%.ko: gcc-plugins prepare scripts FORCE
611 $(cmd_crmodverdir)
612 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
613 $(build)=$(build-dir) $(@:.ko=.o)
614diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
615index ed60a1e..47f1a55 100644
616--- a/arch/alpha/include/asm/atomic.h
617+++ b/arch/alpha/include/asm/atomic.h
618@@ -292,4 +292,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
619 #define atomic_dec(v) atomic_sub(1,(v))
620 #define atomic64_dec(v) atomic64_sub(1,(v))
621
622+#define atomic64_read_unchecked(v) atomic64_read(v)
623+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
624+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
625+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
626+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
627+#define atomic64_inc_unchecked(v) atomic64_inc(v)
628+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
629+#define atomic64_dec_unchecked(v) atomic64_dec(v)
630+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
631+
632 #endif /* _ALPHA_ATOMIC_H */
633diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
634index ad368a9..fbe0f25 100644
635--- a/arch/alpha/include/asm/cache.h
636+++ b/arch/alpha/include/asm/cache.h
637@@ -4,19 +4,19 @@
638 #ifndef __ARCH_ALPHA_CACHE_H
639 #define __ARCH_ALPHA_CACHE_H
640
641+#include <linux/const.h>
642
643 /* Bytes per L1 (data) cache line. */
644 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
645-# define L1_CACHE_BYTES 64
646 # define L1_CACHE_SHIFT 6
647 #else
648 /* Both EV4 and EV5 are write-through, read-allocate,
649 direct-mapped, physical.
650 */
651-# define L1_CACHE_BYTES 32
652 # define L1_CACHE_SHIFT 5
653 #endif
654
655+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
656 #define SMP_CACHE_BYTES L1_CACHE_BYTES
657
658 #endif
659diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
660index 968d999..d36b2df 100644
661--- a/arch/alpha/include/asm/elf.h
662+++ b/arch/alpha/include/asm/elf.h
663@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
664
665 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
666
667+#ifdef CONFIG_PAX_ASLR
668+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
669+
670+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
671+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
672+#endif
673+
674 /* $0 is set by ld.so to a pointer to a function which might be
675 registered using atexit. This provides a mean for the dynamic
676 linker to call DT_FINI functions for shared libraries that have
677diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
678index aab14a0..b4fa3e7 100644
679--- a/arch/alpha/include/asm/pgalloc.h
680+++ b/arch/alpha/include/asm/pgalloc.h
681@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
682 pgd_set(pgd, pmd);
683 }
684
685+static inline void
686+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
687+{
688+ pgd_populate(mm, pgd, pmd);
689+}
690+
691 extern pgd_t *pgd_alloc(struct mm_struct *mm);
692
693 static inline void
694diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
695index d8f9b7e..f6222fa 100644
696--- a/arch/alpha/include/asm/pgtable.h
697+++ b/arch/alpha/include/asm/pgtable.h
698@@ -102,6 +102,17 @@ struct vm_area_struct;
699 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
700 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
701 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
702+
703+#ifdef CONFIG_PAX_PAGEEXEC
704+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
705+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
706+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
707+#else
708+# define PAGE_SHARED_NOEXEC PAGE_SHARED
709+# define PAGE_COPY_NOEXEC PAGE_COPY
710+# define PAGE_READONLY_NOEXEC PAGE_READONLY
711+#endif
712+
713 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
714
715 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
716diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
717index 2fd00b7..cfd5069 100644
718--- a/arch/alpha/kernel/module.c
719+++ b/arch/alpha/kernel/module.c
720@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
721
722 /* The small sections were sorted to the end of the segment.
723 The following should definitely cover them. */
724- gp = (u64)me->module_core + me->core_size - 0x8000;
725+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
726 got = sechdrs[me->arch.gotsecindex].sh_addr;
727
728 for (i = 0; i < n; i++) {
729diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
730index 1402fcc..0b1abd2 100644
731--- a/arch/alpha/kernel/osf_sys.c
732+++ b/arch/alpha/kernel/osf_sys.c
733@@ -1298,10 +1298,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
734 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
735
736 static unsigned long
737-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
738- unsigned long limit)
739+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
740+ unsigned long limit, unsigned long flags)
741 {
742 struct vm_unmapped_area_info info;
743+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
744
745 info.flags = 0;
746 info.length = len;
747@@ -1309,6 +1310,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
748 info.high_limit = limit;
749 info.align_mask = 0;
750 info.align_offset = 0;
751+ info.threadstack_offset = offset;
752 return vm_unmapped_area(&info);
753 }
754
755@@ -1341,20 +1343,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
756 merely specific addresses, but regions of memory -- perhaps
757 this feature should be incorporated into all ports? */
758
759+#ifdef CONFIG_PAX_RANDMMAP
760+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
761+#endif
762+
763 if (addr) {
764- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
765+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
766 if (addr != (unsigned long) -ENOMEM)
767 return addr;
768 }
769
770 /* Next, try allocating at TASK_UNMAPPED_BASE. */
771- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
772- len, limit);
773+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
774+
775 if (addr != (unsigned long) -ENOMEM)
776 return addr;
777
778 /* Finally, try allocating in low memory. */
779- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
780+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
781
782 return addr;
783 }
784diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
785index 98838a0..b304fb4 100644
786--- a/arch/alpha/mm/fault.c
787+++ b/arch/alpha/mm/fault.c
788@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
789 __reload_thread(pcb);
790 }
791
792+#ifdef CONFIG_PAX_PAGEEXEC
793+/*
794+ * PaX: decide what to do with offenders (regs->pc = fault address)
795+ *
796+ * returns 1 when task should be killed
797+ * 2 when patched PLT trampoline was detected
798+ * 3 when unpatched PLT trampoline was detected
799+ */
800+static int pax_handle_fetch_fault(struct pt_regs *regs)
801+{
802+
803+#ifdef CONFIG_PAX_EMUPLT
804+ int err;
805+
806+ do { /* PaX: patched PLT emulation #1 */
807+ unsigned int ldah, ldq, jmp;
808+
809+ err = get_user(ldah, (unsigned int *)regs->pc);
810+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
811+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
812+
813+ if (err)
814+ break;
815+
816+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
817+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
818+ jmp == 0x6BFB0000U)
819+ {
820+ unsigned long r27, addr;
821+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
822+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
823+
824+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
825+ err = get_user(r27, (unsigned long *)addr);
826+ if (err)
827+ break;
828+
829+ regs->r27 = r27;
830+ regs->pc = r27;
831+ return 2;
832+ }
833+ } while (0);
834+
835+ do { /* PaX: patched PLT emulation #2 */
836+ unsigned int ldah, lda, br;
837+
838+ err = get_user(ldah, (unsigned int *)regs->pc);
839+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
840+ err |= get_user(br, (unsigned int *)(regs->pc+8));
841+
842+ if (err)
843+ break;
844+
845+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
846+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
847+ (br & 0xFFE00000U) == 0xC3E00000U)
848+ {
849+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
850+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
851+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
852+
853+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
854+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
855+ return 2;
856+ }
857+ } while (0);
858+
859+ do { /* PaX: unpatched PLT emulation */
860+ unsigned int br;
861+
862+ err = get_user(br, (unsigned int *)regs->pc);
863+
864+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
865+ unsigned int br2, ldq, nop, jmp;
866+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
867+
868+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
869+ err = get_user(br2, (unsigned int *)addr);
870+ err |= get_user(ldq, (unsigned int *)(addr+4));
871+ err |= get_user(nop, (unsigned int *)(addr+8));
872+ err |= get_user(jmp, (unsigned int *)(addr+12));
873+ err |= get_user(resolver, (unsigned long *)(addr+16));
874+
875+ if (err)
876+ break;
877+
878+ if (br2 == 0xC3600000U &&
879+ ldq == 0xA77B000CU &&
880+ nop == 0x47FF041FU &&
881+ jmp == 0x6B7B0000U)
882+ {
883+ regs->r28 = regs->pc+4;
884+ regs->r27 = addr+16;
885+ regs->pc = resolver;
886+ return 3;
887+ }
888+ }
889+ } while (0);
890+#endif
891+
892+ return 1;
893+}
894+
895+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
896+{
897+ unsigned long i;
898+
899+ printk(KERN_ERR "PAX: bytes at PC: ");
900+ for (i = 0; i < 5; i++) {
901+ unsigned int c;
902+ if (get_user(c, (unsigned int *)pc+i))
903+ printk(KERN_CONT "???????? ");
904+ else
905+ printk(KERN_CONT "%08x ", c);
906+ }
907+ printk("\n");
908+}
909+#endif
910
911 /*
912 * This routine handles page faults. It determines the address,
913@@ -133,8 +251,29 @@ retry:
914 good_area:
915 si_code = SEGV_ACCERR;
916 if (cause < 0) {
917- if (!(vma->vm_flags & VM_EXEC))
918+ if (!(vma->vm_flags & VM_EXEC)) {
919+
920+#ifdef CONFIG_PAX_PAGEEXEC
921+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
922+ goto bad_area;
923+
924+ up_read(&mm->mmap_sem);
925+ switch (pax_handle_fetch_fault(regs)) {
926+
927+#ifdef CONFIG_PAX_EMUPLT
928+ case 2:
929+ case 3:
930+ return;
931+#endif
932+
933+ }
934+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
935+ do_group_exit(SIGKILL);
936+#else
937 goto bad_area;
938+#endif
939+
940+ }
941 } else if (!cause) {
942 /* Allow reads even for write-only mappings */
943 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
944diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c
945index a2ff5c5..ecf6a78 100644
946--- a/arch/arc/kernel/kgdb.c
947+++ b/arch/arc/kernel/kgdb.c
948@@ -158,11 +158,6 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
949 return -1;
950 }
951
952-unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
953-{
954- return instruction_pointer(regs);
955-}
956-
957 int kgdb_arch_init(void)
958 {
959 single_step_data.armed = 0;
960diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
961index 32cbbd5..c102df9 100644
962--- a/arch/arm/Kconfig
963+++ b/arch/arm/Kconfig
964@@ -1719,7 +1719,7 @@ config ALIGNMENT_TRAP
965
966 config UACCESS_WITH_MEMCPY
967 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
968- depends on MMU
969+ depends on MMU && !PAX_MEMORY_UDEREF
970 default y if CPU_FEROCEON
971 help
972 Implement faster copy_to_user and clear_user methods for CPU
973@@ -1983,6 +1983,7 @@ config XIP_PHYS_ADDR
974 config KEXEC
975 bool "Kexec system call (EXPERIMENTAL)"
976 depends on (!SMP || PM_SLEEP_SMP)
977+ depends on !GRKERNSEC_KMEM
978 help
979 kexec is a system call that implements the ability to shutdown your
980 current kernel, and to start another kernel. It is like a reboot
981diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
982index 3040359..a494fa3 100644
983--- a/arch/arm/include/asm/atomic.h
984+++ b/arch/arm/include/asm/atomic.h
985@@ -18,17 +18,41 @@
986 #include <asm/barrier.h>
987 #include <asm/cmpxchg.h>
988
989+#ifdef CONFIG_GENERIC_ATOMIC64
990+#include <asm-generic/atomic64.h>
991+#endif
992+
993 #define ATOMIC_INIT(i) { (i) }
994
995 #ifdef __KERNEL__
996
997+#ifdef CONFIG_THUMB2_KERNEL
998+#define REFCOUNT_TRAP_INSN "bkpt 0xf1"
999+#else
1000+#define REFCOUNT_TRAP_INSN "bkpt 0xf103"
1001+#endif
1002+
1003+#define _ASM_EXTABLE(from, to) \
1004+" .pushsection __ex_table,\"a\"\n"\
1005+" .align 3\n" \
1006+" .long " #from ", " #to"\n" \
1007+" .popsection"
1008+
1009 /*
1010 * On ARM, ordinary assignment (str instruction) doesn't clear the local
1011 * strex/ldrex monitor on some implementations. The reason we can use it for
1012 * atomic_set() is the clrex or dummy strex done on every exception return.
1013 */
1014 #define atomic_read(v) (*(volatile int *)&(v)->counter)
1015+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
1016+{
1017+ return v->counter;
1018+}
1019 #define atomic_set(v,i) (((v)->counter) = (i))
1020+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
1021+{
1022+ v->counter = i;
1023+}
1024
1025 #if __LINUX_ARM_ARCH__ >= 6
1026
1027@@ -44,6 +68,36 @@ static inline void atomic_add(int i, atomic_t *v)
1028
1029 prefetchw(&v->counter);
1030 __asm__ __volatile__("@ atomic_add\n"
1031+"1: ldrex %1, [%3]\n"
1032+" adds %0, %1, %4\n"
1033+
1034+#ifdef CONFIG_PAX_REFCOUNT
1035+" bvc 3f\n"
1036+"2: " REFCOUNT_TRAP_INSN "\n"
1037+"3:\n"
1038+#endif
1039+
1040+" strex %1, %0, [%3]\n"
1041+" teq %1, #0\n"
1042+" bne 1b"
1043+
1044+#ifdef CONFIG_PAX_REFCOUNT
1045+"\n4:\n"
1046+ _ASM_EXTABLE(2b, 4b)
1047+#endif
1048+
1049+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1050+ : "r" (&v->counter), "Ir" (i)
1051+ : "cc");
1052+}
1053+
1054+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1055+{
1056+ unsigned long tmp;
1057+ int result;
1058+
1059+ prefetchw(&v->counter);
1060+ __asm__ __volatile__("@ atomic_add_unchecked\n"
1061 "1: ldrex %0, [%3]\n"
1062 " add %0, %0, %4\n"
1063 " strex %1, %0, [%3]\n"
1064@@ -63,6 +117,43 @@ static inline int atomic_add_return(int i, atomic_t *v)
1065 prefetchw(&v->counter);
1066
1067 __asm__ __volatile__("@ atomic_add_return\n"
1068+"1: ldrex %1, [%3]\n"
1069+" adds %0, %1, %4\n"
1070+
1071+#ifdef CONFIG_PAX_REFCOUNT
1072+" bvc 3f\n"
1073+" mov %0, %1\n"
1074+"2: " REFCOUNT_TRAP_INSN "\n"
1075+"3:\n"
1076+#endif
1077+
1078+" strex %1, %0, [%3]\n"
1079+" teq %1, #0\n"
1080+" bne 1b"
1081+
1082+#ifdef CONFIG_PAX_REFCOUNT
1083+"\n4:\n"
1084+ _ASM_EXTABLE(2b, 4b)
1085+#endif
1086+
1087+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1088+ : "r" (&v->counter), "Ir" (i)
1089+ : "cc");
1090+
1091+ smp_mb();
1092+
1093+ return result;
1094+}
1095+
1096+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1097+{
1098+ unsigned long tmp;
1099+ int result;
1100+
1101+ smp_mb();
1102+ prefetchw(&v->counter);
1103+
1104+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
1105 "1: ldrex %0, [%3]\n"
1106 " add %0, %0, %4\n"
1107 " strex %1, %0, [%3]\n"
1108@@ -84,6 +175,36 @@ static inline void atomic_sub(int i, atomic_t *v)
1109
1110 prefetchw(&v->counter);
1111 __asm__ __volatile__("@ atomic_sub\n"
1112+"1: ldrex %1, [%3]\n"
1113+" subs %0, %1, %4\n"
1114+
1115+#ifdef CONFIG_PAX_REFCOUNT
1116+" bvc 3f\n"
1117+"2: " REFCOUNT_TRAP_INSN "\n"
1118+"3:\n"
1119+#endif
1120+
1121+" strex %1, %0, [%3]\n"
1122+" teq %1, #0\n"
1123+" bne 1b"
1124+
1125+#ifdef CONFIG_PAX_REFCOUNT
1126+"\n4:\n"
1127+ _ASM_EXTABLE(2b, 4b)
1128+#endif
1129+
1130+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1131+ : "r" (&v->counter), "Ir" (i)
1132+ : "cc");
1133+}
1134+
1135+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1136+{
1137+ unsigned long tmp;
1138+ int result;
1139+
1140+ prefetchw(&v->counter);
1141+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
1142 "1: ldrex %0, [%3]\n"
1143 " sub %0, %0, %4\n"
1144 " strex %1, %0, [%3]\n"
1145@@ -103,11 +224,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1146 prefetchw(&v->counter);
1147
1148 __asm__ __volatile__("@ atomic_sub_return\n"
1149-"1: ldrex %0, [%3]\n"
1150-" sub %0, %0, %4\n"
1151+"1: ldrex %1, [%3]\n"
1152+" subs %0, %1, %4\n"
1153+
1154+#ifdef CONFIG_PAX_REFCOUNT
1155+" bvc 3f\n"
1156+" mov %0, %1\n"
1157+"2: " REFCOUNT_TRAP_INSN "\n"
1158+"3:\n"
1159+#endif
1160+
1161 " strex %1, %0, [%3]\n"
1162 " teq %1, #0\n"
1163 " bne 1b"
1164+
1165+#ifdef CONFIG_PAX_REFCOUNT
1166+"\n4:\n"
1167+ _ASM_EXTABLE(2b, 4b)
1168+#endif
1169+
1170 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1171 : "r" (&v->counter), "Ir" (i)
1172 : "cc");
1173@@ -152,12 +287,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1174 __asm__ __volatile__ ("@ atomic_add_unless\n"
1175 "1: ldrex %0, [%4]\n"
1176 " teq %0, %5\n"
1177-" beq 2f\n"
1178-" add %1, %0, %6\n"
1179+" beq 4f\n"
1180+" adds %1, %0, %6\n"
1181+
1182+#ifdef CONFIG_PAX_REFCOUNT
1183+" bvc 3f\n"
1184+"2: " REFCOUNT_TRAP_INSN "\n"
1185+"3:\n"
1186+#endif
1187+
1188 " strex %2, %1, [%4]\n"
1189 " teq %2, #0\n"
1190 " bne 1b\n"
1191-"2:"
1192+"4:"
1193+
1194+#ifdef CONFIG_PAX_REFCOUNT
1195+ _ASM_EXTABLE(2b, 4b)
1196+#endif
1197+
1198 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
1199 : "r" (&v->counter), "r" (u), "r" (a)
1200 : "cc");
1201@@ -168,6 +315,28 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1202 return oldval;
1203 }
1204
1205+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1206+{
1207+ unsigned long oldval, res;
1208+
1209+ smp_mb();
1210+
1211+ do {
1212+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1213+ "ldrex %1, [%3]\n"
1214+ "mov %0, #0\n"
1215+ "teq %1, %4\n"
1216+ "strexeq %0, %5, [%3]\n"
1217+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1218+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1219+ : "cc");
1220+ } while (res);
1221+
1222+ smp_mb();
1223+
1224+ return oldval;
1225+}
1226+
1227 #else /* ARM_ARCH_6 */
1228
1229 #ifdef CONFIG_SMP
1230@@ -186,7 +355,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1231
1232 return val;
1233 }
1234+
1235+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1236+{
1237+ return atomic_add_return(i, v);
1238+}
1239+
1240 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1241+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1242+{
1243+ (void) atomic_add_return(i, v);
1244+}
1245
1246 static inline int atomic_sub_return(int i, atomic_t *v)
1247 {
1248@@ -201,6 +380,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1249 return val;
1250 }
1251 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1252+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1253+{
1254+ (void) atomic_sub_return(i, v);
1255+}
1256
1257 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1258 {
1259@@ -216,6 +399,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1260 return ret;
1261 }
1262
1263+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1264+{
1265+ return atomic_cmpxchg(v, old, new);
1266+}
1267+
1268 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1269 {
1270 int c, old;
1271@@ -229,13 +417,33 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1272 #endif /* __LINUX_ARM_ARCH__ */
1273
1274 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1275+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1276+{
1277+ return xchg(&v->counter, new);
1278+}
1279
1280 #define atomic_inc(v) atomic_add(1, v)
1281+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1282+{
1283+ atomic_add_unchecked(1, v);
1284+}
1285 #define atomic_dec(v) atomic_sub(1, v)
1286+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1287+{
1288+ atomic_sub_unchecked(1, v);
1289+}
1290
1291 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1292+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1293+{
1294+ return atomic_add_return_unchecked(1, v) == 0;
1295+}
1296 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1297 #define atomic_inc_return(v) (atomic_add_return(1, v))
1298+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1299+{
1300+ return atomic_add_return_unchecked(1, v);
1301+}
1302 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1303 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1304
1305@@ -246,6 +454,14 @@ typedef struct {
1306 long long counter;
1307 } atomic64_t;
1308
1309+#ifdef CONFIG_PAX_REFCOUNT
1310+typedef struct {
1311+ long long counter;
1312+} atomic64_unchecked_t;
1313+#else
1314+typedef atomic64_t atomic64_unchecked_t;
1315+#endif
1316+
1317 #define ATOMIC64_INIT(i) { (i) }
1318
1319 #ifdef CONFIG_ARM_LPAE
1320@@ -262,6 +478,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1321 return result;
1322 }
1323
1324+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1325+{
1326+ long long result;
1327+
1328+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1329+" ldrd %0, %H0, [%1]"
1330+ : "=&r" (result)
1331+ : "r" (&v->counter), "Qo" (v->counter)
1332+ );
1333+
1334+ return result;
1335+}
1336+
1337 static inline void atomic64_set(atomic64_t *v, long long i)
1338 {
1339 __asm__ __volatile__("@ atomic64_set\n"
1340@@ -270,6 +499,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1341 : "r" (&v->counter), "r" (i)
1342 );
1343 }
1344+
1345+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1346+{
1347+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1348+" strd %2, %H2, [%1]"
1349+ : "=Qo" (v->counter)
1350+ : "r" (&v->counter), "r" (i)
1351+ );
1352+}
1353 #else
1354 static inline long long atomic64_read(const atomic64_t *v)
1355 {
1356@@ -284,6 +522,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1357 return result;
1358 }
1359
1360+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1361+{
1362+ long long result;
1363+
1364+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1365+" ldrexd %0, %H0, [%1]"
1366+ : "=&r" (result)
1367+ : "r" (&v->counter), "Qo" (v->counter)
1368+ );
1369+
1370+ return result;
1371+}
1372+
1373 static inline void atomic64_set(atomic64_t *v, long long i)
1374 {
1375 long long tmp;
1376@@ -298,6 +549,21 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1377 : "r" (&v->counter), "r" (i)
1378 : "cc");
1379 }
1380+
1381+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1382+{
1383+ long long tmp;
1384+
1385+ prefetchw(&v->counter);
1386+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1387+"1: ldrexd %0, %H0, [%2]\n"
1388+" strexd %0, %3, %H3, [%2]\n"
1389+" teq %0, #0\n"
1390+" bne 1b"
1391+ : "=&r" (tmp), "=Qo" (v->counter)
1392+ : "r" (&v->counter), "r" (i)
1393+ : "cc");
1394+}
1395 #endif
1396
1397 static inline void atomic64_add(long long i, atomic64_t *v)
1398@@ -309,6 +575,37 @@ static inline void atomic64_add(long long i, atomic64_t *v)
1399 __asm__ __volatile__("@ atomic64_add\n"
1400 "1: ldrexd %0, %H0, [%3]\n"
1401 " adds %Q0, %Q0, %Q4\n"
1402+" adcs %R0, %R0, %R4\n"
1403+
1404+#ifdef CONFIG_PAX_REFCOUNT
1405+" bvc 3f\n"
1406+"2: " REFCOUNT_TRAP_INSN "\n"
1407+"3:\n"
1408+#endif
1409+
1410+" strexd %1, %0, %H0, [%3]\n"
1411+" teq %1, #0\n"
1412+" bne 1b"
1413+
1414+#ifdef CONFIG_PAX_REFCOUNT
1415+"\n4:\n"
1416+ _ASM_EXTABLE(2b, 4b)
1417+#endif
1418+
1419+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1420+ : "r" (&v->counter), "r" (i)
1421+ : "cc");
1422+}
1423+
1424+static inline void atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
1425+{
1426+ long long result;
1427+ unsigned long tmp;
1428+
1429+ prefetchw(&v->counter);
1430+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1431+"1: ldrexd %0, %H0, [%3]\n"
1432+" adds %Q0, %Q0, %Q4\n"
1433 " adc %R0, %R0, %R4\n"
1434 " strexd %1, %0, %H0, [%3]\n"
1435 " teq %1, #0\n"
1436@@ -329,6 +626,44 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
1437 __asm__ __volatile__("@ atomic64_add_return\n"
1438 "1: ldrexd %0, %H0, [%3]\n"
1439 " adds %Q0, %Q0, %Q4\n"
1440+" adcs %R0, %R0, %R4\n"
1441+
1442+#ifdef CONFIG_PAX_REFCOUNT
1443+" bvc 3f\n"
1444+" mov %0, %1\n"
1445+" mov %H0, %H1\n"
1446+"2: " REFCOUNT_TRAP_INSN "\n"
1447+"3:\n"
1448+#endif
1449+
1450+" strexd %1, %0, %H0, [%3]\n"
1451+" teq %1, #0\n"
1452+" bne 1b"
1453+
1454+#ifdef CONFIG_PAX_REFCOUNT
1455+"\n4:\n"
1456+ _ASM_EXTABLE(2b, 4b)
1457+#endif
1458+
1459+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1460+ : "r" (&v->counter), "r" (i)
1461+ : "cc");
1462+
1463+ smp_mb();
1464+
1465+ return result;
1466+}
1467+
1468+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
1469+{
1470+ long long result;
1471+ unsigned long tmp;
1472+
1473+ smp_mb();
1474+
1475+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1476+"1: ldrexd %0, %H0, [%3]\n"
1477+" adds %Q0, %Q0, %Q4\n"
1478 " adc %R0, %R0, %R4\n"
1479 " strexd %1, %0, %H0, [%3]\n"
1480 " teq %1, #0\n"
1481@@ -351,6 +686,37 @@ static inline void atomic64_sub(long long i, atomic64_t *v)
1482 __asm__ __volatile__("@ atomic64_sub\n"
1483 "1: ldrexd %0, %H0, [%3]\n"
1484 " subs %Q0, %Q0, %Q4\n"
1485+" sbcs %R0, %R0, %R4\n"
1486+
1487+#ifdef CONFIG_PAX_REFCOUNT
1488+" bvc 3f\n"
1489+"2: " REFCOUNT_TRAP_INSN "\n"
1490+"3:\n"
1491+#endif
1492+
1493+" strexd %1, %0, %H0, [%3]\n"
1494+" teq %1, #0\n"
1495+" bne 1b"
1496+
1497+#ifdef CONFIG_PAX_REFCOUNT
1498+"\n4:\n"
1499+ _ASM_EXTABLE(2b, 4b)
1500+#endif
1501+
1502+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1503+ : "r" (&v->counter), "r" (i)
1504+ : "cc");
1505+}
1506+
1507+static inline void atomic64_sub_unchecked(long long i, atomic64_unchecked_t *v)
1508+{
1509+ long long result;
1510+ unsigned long tmp;
1511+
1512+ prefetchw(&v->counter);
1513+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1514+"1: ldrexd %0, %H0, [%3]\n"
1515+" subs %Q0, %Q0, %Q4\n"
1516 " sbc %R0, %R0, %R4\n"
1517 " strexd %1, %0, %H0, [%3]\n"
1518 " teq %1, #0\n"
1519@@ -371,10 +737,25 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
1520 __asm__ __volatile__("@ atomic64_sub_return\n"
1521 "1: ldrexd %0, %H0, [%3]\n"
1522 " subs %Q0, %Q0, %Q4\n"
1523-" sbc %R0, %R0, %R4\n"
1524+" sbcs %R0, %R0, %R4\n"
1525+
1526+#ifdef CONFIG_PAX_REFCOUNT
1527+" bvc 3f\n"
1528+" mov %0, %1\n"
1529+" mov %H0, %H1\n"
1530+"2: " REFCOUNT_TRAP_INSN "\n"
1531+"3:\n"
1532+#endif
1533+
1534 " strexd %1, %0, %H0, [%3]\n"
1535 " teq %1, #0\n"
1536 " bne 1b"
1537+
1538+#ifdef CONFIG_PAX_REFCOUNT
1539+"\n4:\n"
1540+ _ASM_EXTABLE(2b, 4b)
1541+#endif
1542+
1543 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1544 : "r" (&v->counter), "r" (i)
1545 : "cc");
1546@@ -410,6 +791,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1547 return oldval;
1548 }
1549
1550+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1551+ long long new)
1552+{
1553+ long long oldval;
1554+ unsigned long res;
1555+
1556+ smp_mb();
1557+
1558+ do {
1559+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1560+ "ldrexd %1, %H1, [%3]\n"
1561+ "mov %0, #0\n"
1562+ "teq %1, %4\n"
1563+ "teqeq %H1, %H4\n"
1564+ "strexdeq %0, %5, %H5, [%3]"
1565+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1566+ : "r" (&ptr->counter), "r" (old), "r" (new)
1567+ : "cc");
1568+ } while (res);
1569+
1570+ smp_mb();
1571+
1572+ return oldval;
1573+}
1574+
1575 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1576 {
1577 long long result;
1578@@ -435,21 +841,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1579 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1580 {
1581 long long result;
1582- unsigned long tmp;
1583+ u64 tmp;
1584
1585 smp_mb();
1586 prefetchw(&v->counter);
1587
1588 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1589-"1: ldrexd %0, %H0, [%3]\n"
1590-" subs %Q0, %Q0, #1\n"
1591-" sbc %R0, %R0, #0\n"
1592+"1: ldrexd %1, %H1, [%3]\n"
1593+" subs %Q0, %Q1, #1\n"
1594+" sbcs %R0, %R1, #0\n"
1595+
1596+#ifdef CONFIG_PAX_REFCOUNT
1597+" bvc 3f\n"
1598+" mov %Q0, %Q1\n"
1599+" mov %R0, %R1\n"
1600+"2: " REFCOUNT_TRAP_INSN "\n"
1601+"3:\n"
1602+#endif
1603+
1604 " teq %R0, #0\n"
1605-" bmi 2f\n"
1606+" bmi 4f\n"
1607 " strexd %1, %0, %H0, [%3]\n"
1608 " teq %1, #0\n"
1609 " bne 1b\n"
1610-"2:"
1611+"4:\n"
1612+
1613+#ifdef CONFIG_PAX_REFCOUNT
1614+ _ASM_EXTABLE(2b, 4b)
1615+#endif
1616+
1617 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1618 : "r" (&v->counter)
1619 : "cc");
1620@@ -473,13 +893,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1621 " teq %0, %5\n"
1622 " teqeq %H0, %H5\n"
1623 " moveq %1, #0\n"
1624-" beq 2f\n"
1625+" beq 4f\n"
1626 " adds %Q0, %Q0, %Q6\n"
1627-" adc %R0, %R0, %R6\n"
1628+" adcs %R0, %R0, %R6\n"
1629+
1630+#ifdef CONFIG_PAX_REFCOUNT
1631+" bvc 3f\n"
1632+"2: " REFCOUNT_TRAP_INSN "\n"
1633+"3:\n"
1634+#endif
1635+
1636 " strexd %2, %0, %H0, [%4]\n"
1637 " teq %2, #0\n"
1638 " bne 1b\n"
1639-"2:"
1640+"4:\n"
1641+
1642+#ifdef CONFIG_PAX_REFCOUNT
1643+ _ASM_EXTABLE(2b, 4b)
1644+#endif
1645+
1646 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1647 : "r" (&v->counter), "r" (u), "r" (a)
1648 : "cc");
1649@@ -492,10 +924,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1650
1651 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1652 #define atomic64_inc(v) atomic64_add(1LL, (v))
1653+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1654 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1655+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1656 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1657 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1658 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1659+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1660 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1661 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1662 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1663diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
1664index c6a3e73..35cca85 100644
1665--- a/arch/arm/include/asm/barrier.h
1666+++ b/arch/arm/include/asm/barrier.h
1667@@ -63,7 +63,7 @@
1668 do { \
1669 compiletime_assert_atomic_type(*p); \
1670 smp_mb(); \
1671- ACCESS_ONCE(*p) = (v); \
1672+ ACCESS_ONCE_RW(*p) = (v); \
1673 } while (0)
1674
1675 #define smp_load_acquire(p) \
1676diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1677index 75fe66b..ba3dee4 100644
1678--- a/arch/arm/include/asm/cache.h
1679+++ b/arch/arm/include/asm/cache.h
1680@@ -4,8 +4,10 @@
1681 #ifndef __ASMARM_CACHE_H
1682 #define __ASMARM_CACHE_H
1683
1684+#include <linux/const.h>
1685+
1686 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1687-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1688+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1689
1690 /*
1691 * Memory returned by kmalloc() may be used for DMA, so we must make
1692@@ -24,5 +26,6 @@
1693 #endif
1694
1695 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1696+#define __read_only __attribute__ ((__section__(".data..read_only")))
1697
1698 #endif
1699diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1700index 10e78d0..dc8505d 100644
1701--- a/arch/arm/include/asm/cacheflush.h
1702+++ b/arch/arm/include/asm/cacheflush.h
1703@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1704 void (*dma_unmap_area)(const void *, size_t, int);
1705
1706 void (*dma_flush_range)(const void *, const void *);
1707-};
1708+} __no_const;
1709
1710 /*
1711 * Select the calling method
1712diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1713index 5233151..87a71fa 100644
1714--- a/arch/arm/include/asm/checksum.h
1715+++ b/arch/arm/include/asm/checksum.h
1716@@ -37,7 +37,19 @@ __wsum
1717 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1718
1719 __wsum
1720-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1721+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1722+
1723+static inline __wsum
1724+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1725+{
1726+ __wsum ret;
1727+ pax_open_userland();
1728+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1729+ pax_close_userland();
1730+ return ret;
1731+}
1732+
1733+
1734
1735 /*
1736 * Fold a partial checksum without adding pseudo headers
1737diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1738index abb2c37..96db950 100644
1739--- a/arch/arm/include/asm/cmpxchg.h
1740+++ b/arch/arm/include/asm/cmpxchg.h
1741@@ -104,6 +104,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1742
1743 #define xchg(ptr,x) \
1744 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1745+#define xchg_unchecked(ptr,x) \
1746+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1747
1748 #include <asm-generic/cmpxchg-local.h>
1749
1750diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1751index 6ddbe44..b5e38b1 100644
1752--- a/arch/arm/include/asm/domain.h
1753+++ b/arch/arm/include/asm/domain.h
1754@@ -48,18 +48,37 @@
1755 * Domain types
1756 */
1757 #define DOMAIN_NOACCESS 0
1758-#define DOMAIN_CLIENT 1
1759 #ifdef CONFIG_CPU_USE_DOMAINS
1760+#define DOMAIN_USERCLIENT 1
1761+#define DOMAIN_KERNELCLIENT 1
1762 #define DOMAIN_MANAGER 3
1763+#define DOMAIN_VECTORS DOMAIN_USER
1764 #else
1765+
1766+#ifdef CONFIG_PAX_KERNEXEC
1767 #define DOMAIN_MANAGER 1
1768+#define DOMAIN_KERNEXEC 3
1769+#else
1770+#define DOMAIN_MANAGER 1
1771+#endif
1772+
1773+#ifdef CONFIG_PAX_MEMORY_UDEREF
1774+#define DOMAIN_USERCLIENT 0
1775+#define DOMAIN_UDEREF 1
1776+#define DOMAIN_VECTORS DOMAIN_KERNEL
1777+#else
1778+#define DOMAIN_USERCLIENT 1
1779+#define DOMAIN_VECTORS DOMAIN_USER
1780+#endif
1781+#define DOMAIN_KERNELCLIENT 1
1782+
1783 #endif
1784
1785 #define domain_val(dom,type) ((type) << (2*(dom)))
1786
1787 #ifndef __ASSEMBLY__
1788
1789-#ifdef CONFIG_CPU_USE_DOMAINS
1790+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1791 static inline void set_domain(unsigned val)
1792 {
1793 asm volatile(
1794@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1795 isb();
1796 }
1797
1798-#define modify_domain(dom,type) \
1799- do { \
1800- struct thread_info *thread = current_thread_info(); \
1801- unsigned int domain = thread->cpu_domain; \
1802- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1803- thread->cpu_domain = domain | domain_val(dom, type); \
1804- set_domain(thread->cpu_domain); \
1805- } while (0)
1806-
1807+extern void modify_domain(unsigned int dom, unsigned int type);
1808 #else
1809 static inline void set_domain(unsigned val) { }
1810 static inline void modify_domain(unsigned dom, unsigned type) { }
1811diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1812index afb9caf..9a0bac0 100644
1813--- a/arch/arm/include/asm/elf.h
1814+++ b/arch/arm/include/asm/elf.h
1815@@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1816 the loader. We need to make sure that it is out of the way of the program
1817 that it will "exec", and that there is sufficient room for the brk. */
1818
1819-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1820+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1821+
1822+#ifdef CONFIG_PAX_ASLR
1823+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1824+
1825+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1826+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1827+#endif
1828
1829 /* When the program starts, a1 contains a pointer to a function to be
1830 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1831@@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1832 extern void elf_set_personality(const struct elf32_hdr *);
1833 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1834
1835-struct mm_struct;
1836-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1837-#define arch_randomize_brk arch_randomize_brk
1838-
1839 #ifdef CONFIG_MMU
1840 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1841 struct linux_binprm;
1842diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1843index de53547..52b9a28 100644
1844--- a/arch/arm/include/asm/fncpy.h
1845+++ b/arch/arm/include/asm/fncpy.h
1846@@ -81,7 +81,9 @@
1847 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1848 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1849 \
1850+ pax_open_kernel(); \
1851 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1852+ pax_close_kernel(); \
1853 flush_icache_range((unsigned long)(dest_buf), \
1854 (unsigned long)(dest_buf) + (size)); \
1855 \
1856diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1857index 53e69da..3fdc896 100644
1858--- a/arch/arm/include/asm/futex.h
1859+++ b/arch/arm/include/asm/futex.h
1860@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1861 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1862 return -EFAULT;
1863
1864+ pax_open_userland();
1865+
1866 smp_mb();
1867 /* Prefetching cannot fault */
1868 prefetchw(uaddr);
1869@@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1870 : "cc", "memory");
1871 smp_mb();
1872
1873+ pax_close_userland();
1874+
1875 *uval = val;
1876 return ret;
1877 }
1878@@ -93,6 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1879 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1880 return -EFAULT;
1881
1882+ pax_open_userland();
1883+
1884 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1885 "1: " TUSER(ldr) " %1, [%4]\n"
1886 " teq %1, %2\n"
1887@@ -103,6 +109,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1888 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1889 : "cc", "memory");
1890
1891+ pax_close_userland();
1892+
1893 *uval = val;
1894 return ret;
1895 }
1896@@ -125,6 +133,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1897 return -EFAULT;
1898
1899 pagefault_disable(); /* implies preempt_disable() */
1900+ pax_open_userland();
1901
1902 switch (op) {
1903 case FUTEX_OP_SET:
1904@@ -146,6 +155,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1905 ret = -ENOSYS;
1906 }
1907
1908+ pax_close_userland();
1909 pagefault_enable(); /* subsumes preempt_enable() */
1910
1911 if (!ret) {
1912diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1913index 83eb2f7..ed77159 100644
1914--- a/arch/arm/include/asm/kmap_types.h
1915+++ b/arch/arm/include/asm/kmap_types.h
1916@@ -4,6 +4,6 @@
1917 /*
1918 * This is the "bare minimum". AIO seems to require this.
1919 */
1920-#define KM_TYPE_NR 16
1921+#define KM_TYPE_NR 17
1922
1923 #endif
1924diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1925index 9e614a1..3302cca 100644
1926--- a/arch/arm/include/asm/mach/dma.h
1927+++ b/arch/arm/include/asm/mach/dma.h
1928@@ -22,7 +22,7 @@ struct dma_ops {
1929 int (*residue)(unsigned int, dma_t *); /* optional */
1930 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1931 const char *type;
1932-};
1933+} __do_const;
1934
1935 struct dma_struct {
1936 void *addr; /* single DMA address */
1937diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1938index f98c7f3..e5c626d 100644
1939--- a/arch/arm/include/asm/mach/map.h
1940+++ b/arch/arm/include/asm/mach/map.h
1941@@ -23,17 +23,19 @@ struct map_desc {
1942
1943 /* types 0-3 are defined in asm/io.h */
1944 enum {
1945- MT_UNCACHED = 4,
1946- MT_CACHECLEAN,
1947- MT_MINICLEAN,
1948+ MT_UNCACHED_RW = 4,
1949+ MT_CACHECLEAN_RO,
1950+ MT_MINICLEAN_RO,
1951 MT_LOW_VECTORS,
1952 MT_HIGH_VECTORS,
1953- MT_MEMORY_RWX,
1954+ __MT_MEMORY_RWX,
1955 MT_MEMORY_RW,
1956- MT_ROM,
1957- MT_MEMORY_RWX_NONCACHED,
1958+ MT_MEMORY_RX,
1959+ MT_ROM_RX,
1960+ MT_MEMORY_RW_NONCACHED,
1961+ MT_MEMORY_RX_NONCACHED,
1962 MT_MEMORY_RW_DTCM,
1963- MT_MEMORY_RWX_ITCM,
1964+ MT_MEMORY_RX_ITCM,
1965 MT_MEMORY_RW_SO,
1966 MT_MEMORY_DMA_READY,
1967 };
1968diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1969index 891a56b..48f337e 100644
1970--- a/arch/arm/include/asm/outercache.h
1971+++ b/arch/arm/include/asm/outercache.h
1972@@ -36,7 +36,7 @@ struct outer_cache_fns {
1973
1974 /* This is an ARM L2C thing */
1975 void (*write_sec)(unsigned long, unsigned);
1976-};
1977+} __no_const;
1978
1979 extern struct outer_cache_fns outer_cache;
1980
1981diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1982index 4355f0e..cd9168e 100644
1983--- a/arch/arm/include/asm/page.h
1984+++ b/arch/arm/include/asm/page.h
1985@@ -23,6 +23,7 @@
1986
1987 #else
1988
1989+#include <linux/compiler.h>
1990 #include <asm/glue.h>
1991
1992 /*
1993@@ -114,7 +115,7 @@ struct cpu_user_fns {
1994 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1995 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1996 unsigned long vaddr, struct vm_area_struct *vma);
1997-};
1998+} __no_const;
1999
2000 #ifdef MULTI_USER
2001 extern struct cpu_user_fns cpu_user;
2002diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
2003index 78a7793..e3dc06c 100644
2004--- a/arch/arm/include/asm/pgalloc.h
2005+++ b/arch/arm/include/asm/pgalloc.h
2006@@ -17,6 +17,7 @@
2007 #include <asm/processor.h>
2008 #include <asm/cacheflush.h>
2009 #include <asm/tlbflush.h>
2010+#include <asm/system_info.h>
2011
2012 #define check_pgt_cache() do { } while (0)
2013
2014@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2015 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
2016 }
2017
2018+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2019+{
2020+ pud_populate(mm, pud, pmd);
2021+}
2022+
2023 #else /* !CONFIG_ARM_LPAE */
2024
2025 /*
2026@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2027 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
2028 #define pmd_free(mm, pmd) do { } while (0)
2029 #define pud_populate(mm,pmd,pte) BUG()
2030+#define pud_populate_kernel(mm,pmd,pte) BUG()
2031
2032 #endif /* CONFIG_ARM_LPAE */
2033
2034@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
2035 __free_page(pte);
2036 }
2037
2038+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
2039+{
2040+#ifdef CONFIG_ARM_LPAE
2041+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
2042+#else
2043+ if (addr & SECTION_SIZE)
2044+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
2045+ else
2046+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
2047+#endif
2048+ flush_pmd_entry(pmdp);
2049+}
2050+
2051 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
2052 pmdval_t prot)
2053 {
2054@@ -157,7 +177,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
2055 static inline void
2056 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
2057 {
2058- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
2059+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
2060 }
2061 #define pmd_pgtable(pmd) pmd_page(pmd)
2062
2063diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
2064index 5cfba15..f415e1a 100644
2065--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
2066+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
2067@@ -20,12 +20,15 @@
2068 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
2069 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
2070 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
2071+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
2072 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
2073 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
2074 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
2075+
2076 /*
2077 * - section
2078 */
2079+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
2080 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
2081 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
2082 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
2083@@ -37,6 +40,7 @@
2084 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
2085 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
2086 #define PMD_SECT_AF (_AT(pmdval_t, 0))
2087+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
2088
2089 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
2090 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
2091@@ -66,6 +70,7 @@
2092 * - extended small page/tiny page
2093 */
2094 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
2095+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
2096 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
2097 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
2098 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
2099diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
2100index 219ac88..73ec32a 100644
2101--- a/arch/arm/include/asm/pgtable-2level.h
2102+++ b/arch/arm/include/asm/pgtable-2level.h
2103@@ -126,6 +126,9 @@
2104 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
2105 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
2106
2107+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
2108+#define L_PTE_PXN (_AT(pteval_t, 0))
2109+
2110 /*
2111 * These are the memory types, defined to be compatible with
2112 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
2113diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
2114index 9fd61c7..f8f1cff 100644
2115--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
2116+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
2117@@ -76,6 +76,7 @@
2118 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
2119 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
2120 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
2121+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
2122 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
2123
2124 /*
2125diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
2126index 06e0bc0..c65bca8 100644
2127--- a/arch/arm/include/asm/pgtable-3level.h
2128+++ b/arch/arm/include/asm/pgtable-3level.h
2129@@ -81,6 +81,7 @@
2130 #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
2131 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
2132 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
2133+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
2134 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
2135 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
2136 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
2137@@ -92,10 +93,12 @@
2138 #define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
2139 #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
2140 #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
2141+#define PMD_SECT_RDONLY PMD_SECT_AP2
2142
2143 /*
2144 * To be used in assembly code with the upper page attributes.
2145 */
2146+#define L_PTE_PXN_HIGH (1 << (53 - 32))
2147 #define L_PTE_XN_HIGH (1 << (54 - 32))
2148 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
2149
2150diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
2151index 01baef0..73c156e 100644
2152--- a/arch/arm/include/asm/pgtable.h
2153+++ b/arch/arm/include/asm/pgtable.h
2154@@ -33,6 +33,9 @@
2155 #include <asm/pgtable-2level.h>
2156 #endif
2157
2158+#define ktla_ktva(addr) (addr)
2159+#define ktva_ktla(addr) (addr)
2160+
2161 /*
2162 * Just any arbitrary offset to the start of the vmalloc VM area: the
2163 * current 8MB value just means that there will be a 8MB "hole" after the
2164@@ -48,6 +51,9 @@
2165 #define LIBRARY_TEXT_START 0x0c000000
2166
2167 #ifndef __ASSEMBLY__
2168+extern pteval_t __supported_pte_mask;
2169+extern pmdval_t __supported_pmd_mask;
2170+
2171 extern void __pte_error(const char *file, int line, pte_t);
2172 extern void __pmd_error(const char *file, int line, pmd_t);
2173 extern void __pgd_error(const char *file, int line, pgd_t);
2174@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2175 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2176 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2177
2178+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2179+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2180+
2181+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2182+#include <asm/domain.h>
2183+#include <linux/thread_info.h>
2184+#include <linux/preempt.h>
2185+
2186+static inline int test_domain(int domain, int domaintype)
2187+{
2188+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2189+}
2190+#endif
2191+
2192+#ifdef CONFIG_PAX_KERNEXEC
2193+static inline unsigned long pax_open_kernel(void) {
2194+#ifdef CONFIG_ARM_LPAE
2195+ /* TODO */
2196+#else
2197+ preempt_disable();
2198+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2199+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2200+#endif
2201+ return 0;
2202+}
2203+
2204+static inline unsigned long pax_close_kernel(void) {
2205+#ifdef CONFIG_ARM_LPAE
2206+ /* TODO */
2207+#else
2208+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2209+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2210+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2211+ preempt_enable_no_resched();
2212+#endif
2213+ return 0;
2214+}
2215+#else
2216+static inline unsigned long pax_open_kernel(void) { return 0; }
2217+static inline unsigned long pax_close_kernel(void) { return 0; }
2218+#endif
2219+
2220 /*
2221 * This is the lowest virtual address we can permit any user space
2222 * mapping to be mapped at. This is particularly important for
2223@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2224 /*
2225 * The pgprot_* and protection_map entries will be fixed up in runtime
2226 * to include the cachable and bufferable bits based on memory policy,
2227- * as well as any architecture dependent bits like global/ASID and SMP
2228- * shared mapping bits.
2229+ * as well as any architecture dependent bits like global/ASID, PXN,
2230+ * and SMP shared mapping bits.
2231 */
2232 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2233
2234@@ -269,7 +317,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2235 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2236 {
2237 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2238- L_PTE_NONE | L_PTE_VALID;
2239+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2240 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2241 return pte;
2242 }
2243diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2244index c25ef3e..735f14b 100644
2245--- a/arch/arm/include/asm/psci.h
2246+++ b/arch/arm/include/asm/psci.h
2247@@ -32,7 +32,7 @@ struct psci_operations {
2248 int (*affinity_info)(unsigned long target_affinity,
2249 unsigned long lowest_affinity_level);
2250 int (*migrate_info_type)(void);
2251-};
2252+} __no_const;
2253
2254 extern struct psci_operations psci_ops;
2255 extern struct smp_operations psci_smp_ops;
2256diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2257index 2ec765c..beb1fe16 100644
2258--- a/arch/arm/include/asm/smp.h
2259+++ b/arch/arm/include/asm/smp.h
2260@@ -113,7 +113,7 @@ struct smp_operations {
2261 int (*cpu_disable)(unsigned int cpu);
2262 #endif
2263 #endif
2264-};
2265+} __no_const;
2266
2267 struct of_cpu_method {
2268 const char *method;
2269diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2270index fc44d37..acc63c4 100644
2271--- a/arch/arm/include/asm/thread_info.h
2272+++ b/arch/arm/include/asm/thread_info.h
2273@@ -89,9 +89,9 @@ struct thread_info {
2274 .flags = 0, \
2275 .preempt_count = INIT_PREEMPT_COUNT, \
2276 .addr_limit = KERNEL_DS, \
2277- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2278- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2279- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2280+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2281+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2282+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2283 .restart_block = { \
2284 .fn = do_no_restart_syscall, \
2285 }, \
2286@@ -165,7 +165,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2287 #define TIF_SYSCALL_AUDIT 9
2288 #define TIF_SYSCALL_TRACEPOINT 10
2289 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2290-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2291+/* within 8 bits of TIF_SYSCALL_TRACE
2292+ * to meet flexible second operand requirements
2293+ */
2294+#define TIF_GRSEC_SETXID 12
2295+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2296 #define TIF_USING_IWMMXT 17
2297 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2298 #define TIF_RESTORE_SIGMASK 20
2299@@ -179,10 +183,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2300 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2301 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2302 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2303+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2304
2305 /* Checks for any syscall work in entry-common.S */
2306 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2307- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2308+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2309
2310 /*
2311 * Change these and you break ASM code in entry-common.S
2312diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
2313index 5f833f7..76e6644 100644
2314--- a/arch/arm/include/asm/tls.h
2315+++ b/arch/arm/include/asm/tls.h
2316@@ -3,6 +3,7 @@
2317
2318 #include <linux/compiler.h>
2319 #include <asm/thread_info.h>
2320+#include <asm/pgtable.h>
2321
2322 #ifdef __ASSEMBLY__
2323 #include <asm/asm-offsets.h>
2324@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
2325 * at 0xffff0fe0 must be used instead. (see
2326 * entry-armv.S for details)
2327 */
2328+ pax_open_kernel();
2329 *((unsigned int *)0xffff0ff0) = val;
2330+ pax_close_kernel();
2331 #endif
2332 }
2333
2334diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2335index 4767eb9..bf00668 100644
2336--- a/arch/arm/include/asm/uaccess.h
2337+++ b/arch/arm/include/asm/uaccess.h
2338@@ -18,6 +18,7 @@
2339 #include <asm/domain.h>
2340 #include <asm/unified.h>
2341 #include <asm/compiler.h>
2342+#include <asm/pgtable.h>
2343
2344 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2345 #include <asm-generic/uaccess-unaligned.h>
2346@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2347 static inline void set_fs(mm_segment_t fs)
2348 {
2349 current_thread_info()->addr_limit = fs;
2350- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2351+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2352 }
2353
2354 #define segment_eq(a,b) ((a) == (b))
2355
2356+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2357+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2358+
2359+static inline void pax_open_userland(void)
2360+{
2361+
2362+#ifdef CONFIG_PAX_MEMORY_UDEREF
2363+ if (segment_eq(get_fs(), USER_DS)) {
2364+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2365+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2366+ }
2367+#endif
2368+
2369+}
2370+
2371+static inline void pax_close_userland(void)
2372+{
2373+
2374+#ifdef CONFIG_PAX_MEMORY_UDEREF
2375+ if (segment_eq(get_fs(), USER_DS)) {
2376+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2377+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2378+ }
2379+#endif
2380+
2381+}
2382+
2383 #define __addr_ok(addr) ({ \
2384 unsigned long flag; \
2385 __asm__("cmp %2, %0; movlo %0, #0" \
2386@@ -198,8 +226,12 @@ extern int __get_user_64t_4(void *);
2387
2388 #define get_user(x,p) \
2389 ({ \
2390+ int __e; \
2391 might_fault(); \
2392- __get_user_check(x,p); \
2393+ pax_open_userland(); \
2394+ __e = __get_user_check(x,p); \
2395+ pax_close_userland(); \
2396+ __e; \
2397 })
2398
2399 extern int __put_user_1(void *, unsigned int);
2400@@ -244,8 +276,12 @@ extern int __put_user_8(void *, unsigned long long);
2401
2402 #define put_user(x,p) \
2403 ({ \
2404+ int __e; \
2405 might_fault(); \
2406- __put_user_check(x,p); \
2407+ pax_open_userland(); \
2408+ __e = __put_user_check(x,p); \
2409+ pax_close_userland(); \
2410+ __e; \
2411 })
2412
2413 #else /* CONFIG_MMU */
2414@@ -269,6 +305,7 @@ static inline void set_fs(mm_segment_t fs)
2415
2416 #endif /* CONFIG_MMU */
2417
2418+#define access_ok_noprefault(type,addr,size) access_ok((type),(addr),(size))
2419 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
2420
2421 #define user_addr_max() \
2422@@ -286,13 +323,17 @@ static inline void set_fs(mm_segment_t fs)
2423 #define __get_user(x,ptr) \
2424 ({ \
2425 long __gu_err = 0; \
2426+ pax_open_userland(); \
2427 __get_user_err((x),(ptr),__gu_err); \
2428+ pax_close_userland(); \
2429 __gu_err; \
2430 })
2431
2432 #define __get_user_error(x,ptr,err) \
2433 ({ \
2434+ pax_open_userland(); \
2435 __get_user_err((x),(ptr),err); \
2436+ pax_close_userland(); \
2437 (void) 0; \
2438 })
2439
2440@@ -368,13 +409,17 @@ do { \
2441 #define __put_user(x,ptr) \
2442 ({ \
2443 long __pu_err = 0; \
2444+ pax_open_userland(); \
2445 __put_user_err((x),(ptr),__pu_err); \
2446+ pax_close_userland(); \
2447 __pu_err; \
2448 })
2449
2450 #define __put_user_error(x,ptr,err) \
2451 ({ \
2452+ pax_open_userland(); \
2453 __put_user_err((x),(ptr),err); \
2454+ pax_close_userland(); \
2455 (void) 0; \
2456 })
2457
2458@@ -474,11 +519,44 @@ do { \
2459
2460
2461 #ifdef CONFIG_MMU
2462-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2463-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2464+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2465+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2466+
2467+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2468+{
2469+ unsigned long ret;
2470+
2471+ check_object_size(to, n, false);
2472+ pax_open_userland();
2473+ ret = ___copy_from_user(to, from, n);
2474+ pax_close_userland();
2475+ return ret;
2476+}
2477+
2478+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2479+{
2480+ unsigned long ret;
2481+
2482+ check_object_size(from, n, true);
2483+ pax_open_userland();
2484+ ret = ___copy_to_user(to, from, n);
2485+ pax_close_userland();
2486+ return ret;
2487+}
2488+
2489 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2490-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2491+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2492 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2493+
2494+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2495+{
2496+ unsigned long ret;
2497+ pax_open_userland();
2498+ ret = ___clear_user(addr, n);
2499+ pax_close_userland();
2500+ return ret;
2501+}
2502+
2503 #else
2504 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2505 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2506@@ -487,6 +565,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2507
2508 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2509 {
2510+ if ((long)n < 0)
2511+ return n;
2512+
2513 if (access_ok(VERIFY_READ, from, n))
2514 n = __copy_from_user(to, from, n);
2515 else /* security hole - plug it */
2516@@ -496,6 +577,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2517
2518 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2519 {
2520+ if ((long)n < 0)
2521+ return n;
2522+
2523 if (access_ok(VERIFY_WRITE, to, n))
2524 n = __copy_to_user(to, from, n);
2525 return n;
2526diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2527index 5af0ed1..cea83883 100644
2528--- a/arch/arm/include/uapi/asm/ptrace.h
2529+++ b/arch/arm/include/uapi/asm/ptrace.h
2530@@ -92,7 +92,7 @@
2531 * ARMv7 groups of PSR bits
2532 */
2533 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2534-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2535+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2536 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2537 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2538
2539diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2540index a88671c..1cc895e 100644
2541--- a/arch/arm/kernel/armksyms.c
2542+++ b/arch/arm/kernel/armksyms.c
2543@@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2544
2545 /* networking */
2546 EXPORT_SYMBOL(csum_partial);
2547-EXPORT_SYMBOL(csum_partial_copy_from_user);
2548+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2549 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2550 EXPORT_SYMBOL(__csum_ipv6_magic);
2551
2552@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
2553 #ifdef CONFIG_MMU
2554 EXPORT_SYMBOL(copy_page);
2555
2556-EXPORT_SYMBOL(__copy_from_user);
2557-EXPORT_SYMBOL(__copy_to_user);
2558-EXPORT_SYMBOL(__clear_user);
2559+EXPORT_SYMBOL(___copy_from_user);
2560+EXPORT_SYMBOL(___copy_to_user);
2561+EXPORT_SYMBOL(___clear_user);
2562
2563 EXPORT_SYMBOL(__get_user_1);
2564 EXPORT_SYMBOL(__get_user_2);
2565diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2566index 36276cd..9d7b13b 100644
2567--- a/arch/arm/kernel/entry-armv.S
2568+++ b/arch/arm/kernel/entry-armv.S
2569@@ -47,6 +47,87 @@
2570 9997:
2571 .endm
2572
2573+ .macro pax_enter_kernel
2574+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2575+ @ make aligned space for saved DACR
2576+ sub sp, sp, #8
2577+ @ save regs
2578+ stmdb sp!, {r1, r2}
2579+ @ read DACR from cpu_domain into r1
2580+ mov r2, sp
2581+ @ assume 8K pages, since we have to split the immediate in two
2582+ bic r2, r2, #(0x1fc0)
2583+ bic r2, r2, #(0x3f)
2584+ ldr r1, [r2, #TI_CPU_DOMAIN]
2585+ @ store old DACR on stack
2586+ str r1, [sp, #8]
2587+#ifdef CONFIG_PAX_KERNEXEC
2588+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2589+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2590+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2591+#endif
2592+#ifdef CONFIG_PAX_MEMORY_UDEREF
2593+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2594+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2595+#endif
2596+ @ write r1 to current_thread_info()->cpu_domain
2597+ str r1, [r2, #TI_CPU_DOMAIN]
2598+ @ write r1 to DACR
2599+ mcr p15, 0, r1, c3, c0, 0
2600+ @ instruction sync
2601+ instr_sync
2602+ @ restore regs
2603+ ldmia sp!, {r1, r2}
2604+#endif
2605+ .endm
2606+
2607+ .macro pax_open_userland
2608+#ifdef CONFIG_PAX_MEMORY_UDEREF
2609+ @ save regs
2610+ stmdb sp!, {r0, r1}
2611+ @ read DACR from cpu_domain into r1
2612+ mov r0, sp
2613+ @ assume 8K pages, since we have to split the immediate in two
2614+ bic r0, r0, #(0x1fc0)
2615+ bic r0, r0, #(0x3f)
2616+ ldr r1, [r0, #TI_CPU_DOMAIN]
2617+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2618+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2619+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2620+ @ write r1 to current_thread_info()->cpu_domain
2621+ str r1, [r0, #TI_CPU_DOMAIN]
2622+ @ write r1 to DACR
2623+ mcr p15, 0, r1, c3, c0, 0
2624+ @ instruction sync
2625+ instr_sync
2626+ @ restore regs
2627+ ldmia sp!, {r0, r1}
2628+#endif
2629+ .endm
2630+
2631+ .macro pax_close_userland
2632+#ifdef CONFIG_PAX_MEMORY_UDEREF
2633+ @ save regs
2634+ stmdb sp!, {r0, r1}
2635+ @ read DACR from cpu_domain into r1
2636+ mov r0, sp
2637+ @ assume 8K pages, since we have to split the immediate in two
2638+ bic r0, r0, #(0x1fc0)
2639+ bic r0, r0, #(0x3f)
2640+ ldr r1, [r0, #TI_CPU_DOMAIN]
2641+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2642+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2643+ @ write r1 to current_thread_info()->cpu_domain
2644+ str r1, [r0, #TI_CPU_DOMAIN]
2645+ @ write r1 to DACR
2646+ mcr p15, 0, r1, c3, c0, 0
2647+ @ instruction sync
2648+ instr_sync
2649+ @ restore regs
2650+ ldmia sp!, {r0, r1}
2651+#endif
2652+ .endm
2653+
2654 .macro pabt_helper
2655 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2656 #ifdef MULTI_PABORT
2657@@ -89,11 +170,15 @@
2658 * Invalid mode handlers
2659 */
2660 .macro inv_entry, reason
2661+
2662+ pax_enter_kernel
2663+
2664 sub sp, sp, #S_FRAME_SIZE
2665 ARM( stmib sp, {r1 - lr} )
2666 THUMB( stmia sp, {r0 - r12} )
2667 THUMB( str sp, [sp, #S_SP] )
2668 THUMB( str lr, [sp, #S_LR] )
2669+
2670 mov r1, #\reason
2671 .endm
2672
2673@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2674 .macro svc_entry, stack_hole=0
2675 UNWIND(.fnstart )
2676 UNWIND(.save {r0 - pc} )
2677+
2678+ pax_enter_kernel
2679+
2680 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2681+
2682 #ifdef CONFIG_THUMB2_KERNEL
2683 SPFIX( str r0, [sp] ) @ temporarily saved
2684 SPFIX( mov r0, sp )
2685@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2686 ldmia r0, {r3 - r5}
2687 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2688 mov r6, #-1 @ "" "" "" ""
2689+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2690+ @ offset sp by 8 as done in pax_enter_kernel
2691+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2692+#else
2693 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2694+#endif
2695 SPFIX( addeq r2, r2, #4 )
2696 str r3, [sp, #-4]! @ save the "real" r0 copied
2697 @ from the exception stack
2698@@ -317,6 +411,9 @@ ENDPROC(__pabt_svc)
2699 .macro usr_entry
2700 UNWIND(.fnstart )
2701 UNWIND(.cantunwind ) @ don't unwind the user space
2702+
2703+ pax_enter_kernel_user
2704+
2705 sub sp, sp, #S_FRAME_SIZE
2706 ARM( stmib sp, {r1 - r12} )
2707 THUMB( stmia sp, {r0 - r12} )
2708@@ -421,7 +518,9 @@ __und_usr:
2709 tst r3, #PSR_T_BIT @ Thumb mode?
2710 bne __und_usr_thumb
2711 sub r4, r2, #4 @ ARM instr at LR - 4
2712+ pax_open_userland
2713 1: ldrt r0, [r4]
2714+ pax_close_userland
2715 ARM_BE8(rev r0, r0) @ little endian instruction
2716
2717 @ r0 = 32-bit ARM instruction which caused the exception
2718@@ -455,11 +554,15 @@ __und_usr_thumb:
2719 */
2720 .arch armv6t2
2721 #endif
2722+ pax_open_userland
2723 2: ldrht r5, [r4]
2724+ pax_close_userland
2725 ARM_BE8(rev16 r5, r5) @ little endian instruction
2726 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2727 blo __und_usr_fault_16 @ 16bit undefined instruction
2728+ pax_open_userland
2729 3: ldrht r0, [r2]
2730+ pax_close_userland
2731 ARM_BE8(rev16 r0, r0) @ little endian instruction
2732 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2733 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2734@@ -489,7 +592,8 @@ ENDPROC(__und_usr)
2735 */
2736 .pushsection .fixup, "ax"
2737 .align 2
2738-4: str r4, [sp, #S_PC] @ retry current instruction
2739+4: pax_close_userland
2740+ str r4, [sp, #S_PC] @ retry current instruction
2741 ret r9
2742 .popsection
2743 .pushsection __ex_table,"a"
2744@@ -698,7 +802,7 @@ ENTRY(__switch_to)
2745 THUMB( str lr, [ip], #4 )
2746 ldr r4, [r2, #TI_TP_VALUE]
2747 ldr r5, [r2, #TI_TP_VALUE + 4]
2748-#ifdef CONFIG_CPU_USE_DOMAINS
2749+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2750 ldr r6, [r2, #TI_CPU_DOMAIN]
2751 #endif
2752 switch_tls r1, r4, r5, r3, r7
2753@@ -707,7 +811,7 @@ ENTRY(__switch_to)
2754 ldr r8, =__stack_chk_guard
2755 ldr r7, [r7, #TSK_STACK_CANARY]
2756 #endif
2757-#ifdef CONFIG_CPU_USE_DOMAINS
2758+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2759 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2760 #endif
2761 mov r5, r0
2762diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2763index e52fe5a..1b0a924 100644
2764--- a/arch/arm/kernel/entry-common.S
2765+++ b/arch/arm/kernel/entry-common.S
2766@@ -11,18 +11,46 @@
2767 #include <asm/assembler.h>
2768 #include <asm/unistd.h>
2769 #include <asm/ftrace.h>
2770+#include <asm/domain.h>
2771 #include <asm/unwind.h>
2772
2773+#include "entry-header.S"
2774+
2775 #ifdef CONFIG_NEED_RET_TO_USER
2776 #include <mach/entry-macro.S>
2777 #else
2778 .macro arch_ret_to_user, tmp1, tmp2
2779+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2780+ @ save regs
2781+ stmdb sp!, {r1, r2}
2782+ @ read DACR from cpu_domain into r1
2783+ mov r2, sp
2784+ @ assume 8K pages, since we have to split the immediate in two
2785+ bic r2, r2, #(0x1fc0)
2786+ bic r2, r2, #(0x3f)
2787+ ldr r1, [r2, #TI_CPU_DOMAIN]
2788+#ifdef CONFIG_PAX_KERNEXEC
2789+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2790+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2791+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2792+#endif
2793+#ifdef CONFIG_PAX_MEMORY_UDEREF
2794+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2795+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2796+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2797+#endif
2798+ @ write r1 to current_thread_info()->cpu_domain
2799+ str r1, [r2, #TI_CPU_DOMAIN]
2800+ @ write r1 to DACR
2801+ mcr p15, 0, r1, c3, c0, 0
2802+ @ instruction sync
2803+ instr_sync
2804+ @ restore regs
2805+ ldmia sp!, {r1, r2}
2806+#endif
2807 .endm
2808 #endif
2809
2810-#include "entry-header.S"
2811-
2812-
2813 .align 5
2814 /*
2815 * This is the fast syscall return path. We do as little as
2816@@ -406,6 +434,12 @@ ENTRY(vector_swi)
2817 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
2818 #endif
2819
2820+ /*
2821+ * do this here to avoid a performance hit of wrapping the code above
2822+ * that directly dereferences userland to parse the SWI instruction
2823+ */
2824+ pax_enter_kernel_user
2825+
2826 adr tbl, sys_call_table @ load syscall table pointer
2827
2828 #if defined(CONFIG_OABI_COMPAT)
2829diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2830index 2fdf867..6e909e4 100644
2831--- a/arch/arm/kernel/entry-header.S
2832+++ b/arch/arm/kernel/entry-header.S
2833@@ -188,6 +188,60 @@
2834 msr cpsr_c, \rtemp @ switch back to the SVC mode
2835 .endm
2836
2837+ .macro pax_enter_kernel_user
2838+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2839+ @ save regs
2840+ stmdb sp!, {r0, r1}
2841+ @ read DACR from cpu_domain into r1
2842+ mov r0, sp
2843+ @ assume 8K pages, since we have to split the immediate in two
2844+ bic r0, r0, #(0x1fc0)
2845+ bic r0, r0, #(0x3f)
2846+ ldr r1, [r0, #TI_CPU_DOMAIN]
2847+#ifdef CONFIG_PAX_MEMORY_UDEREF
2848+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2849+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2850+#endif
2851+#ifdef CONFIG_PAX_KERNEXEC
2852+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2853+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2854+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2855+#endif
2856+ @ write r1 to current_thread_info()->cpu_domain
2857+ str r1, [r0, #TI_CPU_DOMAIN]
2858+ @ write r1 to DACR
2859+ mcr p15, 0, r1, c3, c0, 0
2860+ @ instruction sync
2861+ instr_sync
2862+ @ restore regs
2863+ ldmia sp!, {r0, r1}
2864+#endif
2865+ .endm
2866+
2867+ .macro pax_exit_kernel
2868+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2869+ @ save regs
2870+ stmdb sp!, {r0, r1}
2871+ @ read old DACR from stack into r1
2872+ ldr r1, [sp, #(8 + S_SP)]
2873+ sub r1, r1, #8
2874+ ldr r1, [r1]
2875+
2876+ @ write r1 to current_thread_info()->cpu_domain
2877+ mov r0, sp
2878+ @ assume 8K pages, since we have to split the immediate in two
2879+ bic r0, r0, #(0x1fc0)
2880+ bic r0, r0, #(0x3f)
2881+ str r1, [r0, #TI_CPU_DOMAIN]
2882+ @ write r1 to DACR
2883+ mcr p15, 0, r1, c3, c0, 0
2884+ @ instruction sync
2885+ instr_sync
2886+ @ restore regs
2887+ ldmia sp!, {r0, r1}
2888+#endif
2889+ .endm
2890+
2891 #ifndef CONFIG_THUMB2_KERNEL
2892 .macro svc_exit, rpsr, irq = 0
2893 .if \irq != 0
2894@@ -207,6 +261,9 @@
2895 blne trace_hardirqs_off
2896 #endif
2897 .endif
2898+
2899+ pax_exit_kernel
2900+
2901 msr spsr_cxsf, \rpsr
2902 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
2903 @ We must avoid clrex due to Cortex-A15 erratum #830321
2904@@ -254,6 +311,9 @@
2905 blne trace_hardirqs_off
2906 #endif
2907 .endif
2908+
2909+ pax_exit_kernel
2910+
2911 ldr lr, [sp, #S_SP] @ top of the stack
2912 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2913
2914diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2915index 918875d..cd5fa27 100644
2916--- a/arch/arm/kernel/fiq.c
2917+++ b/arch/arm/kernel/fiq.c
2918@@ -87,7 +87,10 @@ void set_fiq_handler(void *start, unsigned int length)
2919 void *base = vectors_page;
2920 unsigned offset = FIQ_OFFSET;
2921
2922+ pax_open_kernel();
2923 memcpy(base + offset, start, length);
2924+ pax_close_kernel();
2925+
2926 if (!cache_is_vipt_nonaliasing())
2927 flush_icache_range((unsigned long)base + offset, offset +
2928 length);
2929diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2930index 664eee8..f470938 100644
2931--- a/arch/arm/kernel/head.S
2932+++ b/arch/arm/kernel/head.S
2933@@ -437,7 +437,7 @@ __enable_mmu:
2934 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2935 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2936 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2937- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2938+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2939 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2940 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2941 #endif
2942diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2943index 6a4dffe..4a86a70 100644
2944--- a/arch/arm/kernel/module.c
2945+++ b/arch/arm/kernel/module.c
2946@@ -38,12 +38,39 @@
2947 #endif
2948
2949 #ifdef CONFIG_MMU
2950-void *module_alloc(unsigned long size)
2951+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2952 {
2953+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2954+ return NULL;
2955 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2956- GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
2957+ GFP_KERNEL, prot, NUMA_NO_NODE,
2958 __builtin_return_address(0));
2959 }
2960+
2961+void *module_alloc(unsigned long size)
2962+{
2963+
2964+#ifdef CONFIG_PAX_KERNEXEC
2965+ return __module_alloc(size, PAGE_KERNEL);
2966+#else
2967+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2968+#endif
2969+
2970+}
2971+
2972+#ifdef CONFIG_PAX_KERNEXEC
2973+void module_free_exec(struct module *mod, void *module_region)
2974+{
2975+ module_free(mod, module_region);
2976+}
2977+EXPORT_SYMBOL(module_free_exec);
2978+
2979+void *module_alloc_exec(unsigned long size)
2980+{
2981+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2982+}
2983+EXPORT_SYMBOL(module_alloc_exec);
2984+#endif
2985 #endif
2986
2987 int
2988diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2989index 07314af..c46655c 100644
2990--- a/arch/arm/kernel/patch.c
2991+++ b/arch/arm/kernel/patch.c
2992@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2993 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
2994 int size;
2995
2996+ pax_open_kernel();
2997 if (thumb2 && __opcode_is_thumb16(insn)) {
2998 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
2999 size = sizeof(u16);
3000@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
3001 *(u32 *)addr = insn;
3002 size = sizeof(u32);
3003 }
3004+ pax_close_kernel();
3005
3006 flush_icache_range((uintptr_t)(addr),
3007 (uintptr_t)(addr) + size);
3008diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
3009index a35f6eb..7af43a0 100644
3010--- a/arch/arm/kernel/process.c
3011+++ b/arch/arm/kernel/process.c
3012@@ -212,6 +212,7 @@ void machine_power_off(void)
3013
3014 if (pm_power_off)
3015 pm_power_off();
3016+ BUG();
3017 }
3018
3019 /*
3020@@ -225,7 +226,7 @@ void machine_power_off(void)
3021 * executing pre-reset code, and using RAM that the primary CPU's code wishes
3022 * to use. Implementing such co-ordination would be essentially impossible.
3023 */
3024-void machine_restart(char *cmd)
3025+__noreturn void machine_restart(char *cmd)
3026 {
3027 local_irq_disable();
3028 smp_send_stop();
3029@@ -248,8 +249,8 @@ void __show_regs(struct pt_regs *regs)
3030
3031 show_regs_print_info(KERN_DEFAULT);
3032
3033- print_symbol("PC is at %s\n", instruction_pointer(regs));
3034- print_symbol("LR is at %s\n", regs->ARM_lr);
3035+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
3036+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
3037 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
3038 "sp : %08lx ip : %08lx fp : %08lx\n",
3039 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
3040@@ -427,12 +428,6 @@ unsigned long get_wchan(struct task_struct *p)
3041 return 0;
3042 }
3043
3044-unsigned long arch_randomize_brk(struct mm_struct *mm)
3045-{
3046- unsigned long range_end = mm->brk + 0x02000000;
3047- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
3048-}
3049-
3050 #ifdef CONFIG_MMU
3051 #ifdef CONFIG_KUSER_HELPERS
3052 /*
3053@@ -448,7 +443,7 @@ static struct vm_area_struct gate_vma = {
3054
3055 static int __init gate_vma_init(void)
3056 {
3057- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
3058+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
3059 return 0;
3060 }
3061 arch_initcall(gate_vma_init);
3062@@ -474,41 +469,16 @@ int in_gate_area_no_mm(unsigned long addr)
3063
3064 const char *arch_vma_name(struct vm_area_struct *vma)
3065 {
3066- return is_gate_vma(vma) ? "[vectors]" :
3067- (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
3068- "[sigpage]" : NULL;
3069+ return is_gate_vma(vma) ? "[vectors]" : NULL;
3070 }
3071
3072-static struct page *signal_page;
3073-extern struct page *get_signal_page(void);
3074-
3075 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3076 {
3077 struct mm_struct *mm = current->mm;
3078- unsigned long addr;
3079- int ret;
3080-
3081- if (!signal_page)
3082- signal_page = get_signal_page();
3083- if (!signal_page)
3084- return -ENOMEM;
3085
3086 down_write(&mm->mmap_sem);
3087- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
3088- if (IS_ERR_VALUE(addr)) {
3089- ret = addr;
3090- goto up_fail;
3091- }
3092-
3093- ret = install_special_mapping(mm, addr, PAGE_SIZE,
3094- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
3095- &signal_page);
3096-
3097- if (ret == 0)
3098- mm->context.sigpage = addr;
3099-
3100- up_fail:
3101+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
3102 up_write(&mm->mmap_sem);
3103- return ret;
3104+ return 0;
3105 }
3106 #endif
3107diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
3108index f73891b..cf3004e 100644
3109--- a/arch/arm/kernel/psci.c
3110+++ b/arch/arm/kernel/psci.c
3111@@ -28,7 +28,7 @@
3112 #include <asm/psci.h>
3113 #include <asm/system_misc.h>
3114
3115-struct psci_operations psci_ops;
3116+struct psci_operations psci_ops __read_only;
3117
3118 static int (*invoke_psci_fn)(u32, u32, u32, u32);
3119 typedef int (*psci_initcall_t)(const struct device_node *);
3120diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
3121index 0c27ed6..b67388e 100644
3122--- a/arch/arm/kernel/ptrace.c
3123+++ b/arch/arm/kernel/ptrace.c
3124@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
3125 regs->ARM_ip = ip;
3126 }
3127
3128+#ifdef CONFIG_GRKERNSEC_SETXID
3129+extern void gr_delayed_cred_worker(void);
3130+#endif
3131+
3132 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3133 {
3134 current_thread_info()->syscall = scno;
3135
3136+#ifdef CONFIG_GRKERNSEC_SETXID
3137+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3138+ gr_delayed_cred_worker();
3139+#endif
3140+
3141 /* Do the secure computing check first; failures should be fast. */
3142 if (secure_computing(scno) == -1)
3143 return -1;
3144diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3145index 84db893d..bd8213a 100644
3146--- a/arch/arm/kernel/setup.c
3147+++ b/arch/arm/kernel/setup.c
3148@@ -104,21 +104,23 @@ EXPORT_SYMBOL(elf_hwcap);
3149 unsigned int elf_hwcap2 __read_mostly;
3150 EXPORT_SYMBOL(elf_hwcap2);
3151
3152+pteval_t __supported_pte_mask __read_only;
3153+pmdval_t __supported_pmd_mask __read_only;
3154
3155 #ifdef MULTI_CPU
3156-struct processor processor __read_mostly;
3157+struct processor processor __read_only;
3158 #endif
3159 #ifdef MULTI_TLB
3160-struct cpu_tlb_fns cpu_tlb __read_mostly;
3161+struct cpu_tlb_fns cpu_tlb __read_only;
3162 #endif
3163 #ifdef MULTI_USER
3164-struct cpu_user_fns cpu_user __read_mostly;
3165+struct cpu_user_fns cpu_user __read_only;
3166 #endif
3167 #ifdef MULTI_CACHE
3168-struct cpu_cache_fns cpu_cache __read_mostly;
3169+struct cpu_cache_fns cpu_cache __read_only;
3170 #endif
3171 #ifdef CONFIG_OUTER_CACHE
3172-struct outer_cache_fns outer_cache __read_mostly;
3173+struct outer_cache_fns outer_cache __read_only;
3174 EXPORT_SYMBOL(outer_cache);
3175 #endif
3176
3177@@ -251,9 +253,13 @@ static int __get_cpu_architecture(void)
3178 asm("mrc p15, 0, %0, c0, c1, 4"
3179 : "=r" (mmfr0));
3180 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3181- (mmfr0 & 0x000000f0) >= 0x00000030)
3182+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3183 cpu_arch = CPU_ARCH_ARMv7;
3184- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3185+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3186+ __supported_pte_mask |= L_PTE_PXN;
3187+ __supported_pmd_mask |= PMD_PXNTABLE;
3188+ }
3189+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3190 (mmfr0 & 0x000000f0) == 0x00000020)
3191 cpu_arch = CPU_ARCH_ARMv6;
3192 else
3193diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3194index bd19834..e4d8c66 100644
3195--- a/arch/arm/kernel/signal.c
3196+++ b/arch/arm/kernel/signal.c
3197@@ -24,8 +24,6 @@
3198
3199 extern const unsigned long sigreturn_codes[7];
3200
3201-static unsigned long signal_return_offset;
3202-
3203 #ifdef CONFIG_CRUNCH
3204 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3205 {
3206@@ -396,8 +394,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3207 * except when the MPU has protected the vectors
3208 * page from PL0
3209 */
3210- retcode = mm->context.sigpage + signal_return_offset +
3211- (idx << 2) + thumb;
3212+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3213 } else
3214 #endif
3215 {
3216@@ -604,33 +601,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3217 } while (thread_flags & _TIF_WORK_MASK);
3218 return 0;
3219 }
3220-
3221-struct page *get_signal_page(void)
3222-{
3223- unsigned long ptr;
3224- unsigned offset;
3225- struct page *page;
3226- void *addr;
3227-
3228- page = alloc_pages(GFP_KERNEL, 0);
3229-
3230- if (!page)
3231- return NULL;
3232-
3233- addr = page_address(page);
3234-
3235- /* Give the signal return code some randomness */
3236- offset = 0x200 + (get_random_int() & 0x7fc);
3237- signal_return_offset = offset;
3238-
3239- /*
3240- * Copy signal return handlers into the vector page, and
3241- * set sigreturn to be a pointer to these.
3242- */
3243- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3244-
3245- ptr = (unsigned long)addr + offset;
3246- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3247-
3248- return page;
3249-}
3250diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3251index bbe22fc..d7737f5 100644
3252--- a/arch/arm/kernel/smp.c
3253+++ b/arch/arm/kernel/smp.c
3254@@ -76,7 +76,7 @@ enum ipi_msg_type {
3255
3256 static DECLARE_COMPLETION(cpu_running);
3257
3258-static struct smp_operations smp_ops;
3259+static struct smp_operations smp_ops __read_only;
3260
3261 void __init smp_set_ops(struct smp_operations *ops)
3262 {
3263diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
3264index 7a3be1d..b00c7de 100644
3265--- a/arch/arm/kernel/tcm.c
3266+++ b/arch/arm/kernel/tcm.c
3267@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
3268 .virtual = ITCM_OFFSET,
3269 .pfn = __phys_to_pfn(ITCM_OFFSET),
3270 .length = 0,
3271- .type = MT_MEMORY_RWX_ITCM,
3272+ .type = MT_MEMORY_RX_ITCM,
3273 }
3274 };
3275
3276@@ -267,7 +267,9 @@ no_dtcm:
3277 start = &__sitcm_text;
3278 end = &__eitcm_text;
3279 ram = &__itcm_start;
3280+ pax_open_kernel();
3281 memcpy(start, ram, itcm_code_sz);
3282+ pax_close_kernel();
3283 pr_debug("CPU ITCM: copied code from %p - %p\n",
3284 start, end);
3285 itcm_present = true;
3286diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3287index a964c9f..cf2a5b1 100644
3288--- a/arch/arm/kernel/traps.c
3289+++ b/arch/arm/kernel/traps.c
3290@@ -64,7 +64,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3291 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3292 {
3293 #ifdef CONFIG_KALLSYMS
3294- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3295+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3296 #else
3297 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3298 #endif
3299@@ -266,6 +266,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3300 static int die_owner = -1;
3301 static unsigned int die_nest_count;
3302
3303+extern void gr_handle_kernel_exploit(void);
3304+
3305 static unsigned long oops_begin(void)
3306 {
3307 int cpu;
3308@@ -308,6 +310,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3309 panic("Fatal exception in interrupt");
3310 if (panic_on_oops)
3311 panic("Fatal exception");
3312+
3313+ gr_handle_kernel_exploit();
3314+
3315 if (signr)
3316 do_exit(signr);
3317 }
3318@@ -887,7 +892,11 @@ void __init early_trap_init(void *vectors_base)
3319 kuser_init(vectors_base);
3320
3321 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3322- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3323+
3324+#ifndef CONFIG_PAX_MEMORY_UDEREF
3325+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3326+#endif
3327+
3328 #else /* ifndef CONFIG_CPU_V7M */
3329 /*
3330 * on V7-M there is no need to copy the vector table to a dedicated
3331diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3332index 6f57cb9..645f8c4 100644
3333--- a/arch/arm/kernel/vmlinux.lds.S
3334+++ b/arch/arm/kernel/vmlinux.lds.S
3335@@ -8,7 +8,11 @@
3336 #include <asm/thread_info.h>
3337 #include <asm/memory.h>
3338 #include <asm/page.h>
3339-
3340+
3341+#ifdef CONFIG_PAX_KERNEXEC
3342+#include <asm/pgtable.h>
3343+#endif
3344+
3345 #define PROC_INFO \
3346 . = ALIGN(4); \
3347 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3348@@ -34,7 +38,7 @@
3349 #endif
3350
3351 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3352- defined(CONFIG_GENERIC_BUG)
3353+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3354 #define ARM_EXIT_KEEP(x) x
3355 #define ARM_EXIT_DISCARD(x)
3356 #else
3357@@ -90,6 +94,11 @@ SECTIONS
3358 _text = .;
3359 HEAD_TEXT
3360 }
3361+
3362+#ifdef CONFIG_PAX_KERNEXEC
3363+ . = ALIGN(1<<SECTION_SHIFT);
3364+#endif
3365+
3366 .text : { /* Real text segment */
3367 _stext = .; /* Text and read-only data */
3368 __exception_text_start = .;
3369@@ -112,6 +121,8 @@ SECTIONS
3370 ARM_CPU_KEEP(PROC_INFO)
3371 }
3372
3373+ _etext = .; /* End of text section */
3374+
3375 RO_DATA(PAGE_SIZE)
3376
3377 . = ALIGN(4);
3378@@ -142,7 +153,9 @@ SECTIONS
3379
3380 NOTES
3381
3382- _etext = .; /* End of text and rodata section */
3383+#ifdef CONFIG_PAX_KERNEXEC
3384+ . = ALIGN(1<<SECTION_SHIFT);
3385+#endif
3386
3387 #ifndef CONFIG_XIP_KERNEL
3388 . = ALIGN(PAGE_SIZE);
3389@@ -220,6 +233,11 @@ SECTIONS
3390 . = PAGE_OFFSET + TEXT_OFFSET;
3391 #else
3392 __init_end = .;
3393+
3394+#ifdef CONFIG_PAX_KERNEXEC
3395+ . = ALIGN(1<<SECTION_SHIFT);
3396+#endif
3397+
3398 . = ALIGN(THREAD_SIZE);
3399 __data_loc = .;
3400 #endif
3401diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3402index a99e0cd..ab56421d 100644
3403--- a/arch/arm/kvm/arm.c
3404+++ b/arch/arm/kvm/arm.c
3405@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
3406 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3407
3408 /* The VMID used in the VTTBR */
3409-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3410+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3411 static u8 kvm_next_vmid;
3412 static DEFINE_SPINLOCK(kvm_vmid_lock);
3413
3414@@ -372,7 +372,7 @@ void force_vm_exit(const cpumask_t *mask)
3415 */
3416 static bool need_new_vmid_gen(struct kvm *kvm)
3417 {
3418- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3419+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3420 }
3421
3422 /**
3423@@ -405,7 +405,7 @@ static void update_vttbr(struct kvm *kvm)
3424
3425 /* First user of a new VMID generation? */
3426 if (unlikely(kvm_next_vmid == 0)) {
3427- atomic64_inc(&kvm_vmid_gen);
3428+ atomic64_inc_unchecked(&kvm_vmid_gen);
3429 kvm_next_vmid = 1;
3430
3431 /*
3432@@ -422,7 +422,7 @@ static void update_vttbr(struct kvm *kvm)
3433 kvm_call_hyp(__kvm_flush_vm_context);
3434 }
3435
3436- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3437+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3438 kvm->arch.vmid = kvm_next_vmid;
3439 kvm_next_vmid++;
3440
3441@@ -997,7 +997,7 @@ static void check_kvm_target_cpu(void *ret)
3442 /**
3443 * Initialize Hyp-mode and memory mappings on all CPUs.
3444 */
3445-int kvm_arch_init(void *opaque)
3446+int kvm_arch_init(const void *opaque)
3447 {
3448 int err;
3449 int ret, cpu;
3450diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3451index 14a0d98..7771a7d 100644
3452--- a/arch/arm/lib/clear_user.S
3453+++ b/arch/arm/lib/clear_user.S
3454@@ -12,14 +12,14 @@
3455
3456 .text
3457
3458-/* Prototype: int __clear_user(void *addr, size_t sz)
3459+/* Prototype: int ___clear_user(void *addr, size_t sz)
3460 * Purpose : clear some user memory
3461 * Params : addr - user memory address to clear
3462 * : sz - number of bytes to clear
3463 * Returns : number of bytes NOT cleared
3464 */
3465 ENTRY(__clear_user_std)
3466-WEAK(__clear_user)
3467+WEAK(___clear_user)
3468 stmfd sp!, {r1, lr}
3469 mov r2, #0
3470 cmp r1, #4
3471@@ -44,7 +44,7 @@ WEAK(__clear_user)
3472 USER( strnebt r2, [r0])
3473 mov r0, #0
3474 ldmfd sp!, {r1, pc}
3475-ENDPROC(__clear_user)
3476+ENDPROC(___clear_user)
3477 ENDPROC(__clear_user_std)
3478
3479 .pushsection .fixup,"ax"
3480diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3481index 66a477a..bee61d3 100644
3482--- a/arch/arm/lib/copy_from_user.S
3483+++ b/arch/arm/lib/copy_from_user.S
3484@@ -16,7 +16,7 @@
3485 /*
3486 * Prototype:
3487 *
3488- * size_t __copy_from_user(void *to, const void *from, size_t n)
3489+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3490 *
3491 * Purpose:
3492 *
3493@@ -84,11 +84,11 @@
3494
3495 .text
3496
3497-ENTRY(__copy_from_user)
3498+ENTRY(___copy_from_user)
3499
3500 #include "copy_template.S"
3501
3502-ENDPROC(__copy_from_user)
3503+ENDPROC(___copy_from_user)
3504
3505 .pushsection .fixup,"ax"
3506 .align 0
3507diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3508index 6ee2f67..d1cce76 100644
3509--- a/arch/arm/lib/copy_page.S
3510+++ b/arch/arm/lib/copy_page.S
3511@@ -10,6 +10,7 @@
3512 * ASM optimised string functions
3513 */
3514 #include <linux/linkage.h>
3515+#include <linux/const.h>
3516 #include <asm/assembler.h>
3517 #include <asm/asm-offsets.h>
3518 #include <asm/cache.h>
3519diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3520index d066df6..df28194 100644
3521--- a/arch/arm/lib/copy_to_user.S
3522+++ b/arch/arm/lib/copy_to_user.S
3523@@ -16,7 +16,7 @@
3524 /*
3525 * Prototype:
3526 *
3527- * size_t __copy_to_user(void *to, const void *from, size_t n)
3528+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3529 *
3530 * Purpose:
3531 *
3532@@ -88,11 +88,11 @@
3533 .text
3534
3535 ENTRY(__copy_to_user_std)
3536-WEAK(__copy_to_user)
3537+WEAK(___copy_to_user)
3538
3539 #include "copy_template.S"
3540
3541-ENDPROC(__copy_to_user)
3542+ENDPROC(___copy_to_user)
3543 ENDPROC(__copy_to_user_std)
3544
3545 .pushsection .fixup,"ax"
3546diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3547index 7d08b43..f7ca7ea 100644
3548--- a/arch/arm/lib/csumpartialcopyuser.S
3549+++ b/arch/arm/lib/csumpartialcopyuser.S
3550@@ -57,8 +57,8 @@
3551 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3552 */
3553
3554-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3555-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3556+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3557+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3558
3559 #include "csumpartialcopygeneric.S"
3560
3561diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3562index 312d43e..21d2322 100644
3563--- a/arch/arm/lib/delay.c
3564+++ b/arch/arm/lib/delay.c
3565@@ -29,7 +29,7 @@
3566 /*
3567 * Default to the loop-based delay implementation.
3568 */
3569-struct arm_delay_ops arm_delay_ops = {
3570+struct arm_delay_ops arm_delay_ops __read_only = {
3571 .delay = __loop_delay,
3572 .const_udelay = __loop_const_udelay,
3573 .udelay = __loop_udelay,
3574diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3575index 3e58d71..029817c 100644
3576--- a/arch/arm/lib/uaccess_with_memcpy.c
3577+++ b/arch/arm/lib/uaccess_with_memcpy.c
3578@@ -136,7 +136,7 @@ out:
3579 }
3580
3581 unsigned long
3582-__copy_to_user(void __user *to, const void *from, unsigned long n)
3583+___copy_to_user(void __user *to, const void *from, unsigned long n)
3584 {
3585 /*
3586 * This test is stubbed out of the main function above to keep
3587@@ -190,7 +190,7 @@ out:
3588 return n;
3589 }
3590
3591-unsigned long __clear_user(void __user *addr, unsigned long n)
3592+unsigned long ___clear_user(void __user *addr, unsigned long n)
3593 {
3594 /* See rational for this in __copy_to_user() above. */
3595 if (n < 64)
3596diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
3597index f7a07a5..258e1f7 100644
3598--- a/arch/arm/mach-at91/setup.c
3599+++ b/arch/arm/mach-at91/setup.c
3600@@ -81,7 +81,7 @@ void __init at91_init_sram(int bank, unsigned long base, unsigned int length)
3601
3602 desc->pfn = __phys_to_pfn(base);
3603 desc->length = length;
3604- desc->type = MT_MEMORY_RWX_NONCACHED;
3605+ desc->type = MT_MEMORY_RW_NONCACHED;
3606
3607 pr_info("AT91: sram at 0x%lx of 0x%x mapped at 0x%lx\n",
3608 base, length, desc->virtual);
3609diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
3610index 7f352de..6dc0929 100644
3611--- a/arch/arm/mach-keystone/keystone.c
3612+++ b/arch/arm/mach-keystone/keystone.c
3613@@ -27,7 +27,7 @@
3614
3615 #include "keystone.h"
3616
3617-static struct notifier_block platform_nb;
3618+static notifier_block_no_const platform_nb;
3619 static unsigned long keystone_dma_pfn_offset __read_mostly;
3620
3621 static int keystone_platform_notifier(struct notifier_block *nb,
3622diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
3623index 2bdc323..cf1c607 100644
3624--- a/arch/arm/mach-mvebu/coherency.c
3625+++ b/arch/arm/mach-mvebu/coherency.c
3626@@ -316,7 +316,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
3627
3628 /*
3629 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
3630- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
3631+ * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This
3632 * is needed as a workaround for a deadlock issue between the PCIe
3633 * interface and the cache controller.
3634 */
3635@@ -329,7 +329,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
3636 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
3637
3638 if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
3639- mtype = MT_UNCACHED;
3640+ mtype = MT_UNCACHED_RW;
3641
3642 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
3643 }
3644diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3645index aead77a..a2253fa 100644
3646--- a/arch/arm/mach-omap2/board-n8x0.c
3647+++ b/arch/arm/mach-omap2/board-n8x0.c
3648@@ -568,7 +568,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3649 }
3650 #endif
3651
3652-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3653+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3654 .late_init = n8x0_menelaus_late_init,
3655 };
3656
3657diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
3658index 2f97228..6ce10e1 100644
3659--- a/arch/arm/mach-omap2/gpmc.c
3660+++ b/arch/arm/mach-omap2/gpmc.c
3661@@ -151,7 +151,6 @@ struct omap3_gpmc_regs {
3662 };
3663
3664 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
3665-static struct irq_chip gpmc_irq_chip;
3666 static int gpmc_irq_start;
3667
3668 static struct resource gpmc_mem_root;
3669@@ -736,6 +735,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
3670
3671 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
3672
3673+static struct irq_chip gpmc_irq_chip = {
3674+ .name = "gpmc",
3675+ .irq_startup = gpmc_irq_noop_ret,
3676+ .irq_enable = gpmc_irq_enable,
3677+ .irq_disable = gpmc_irq_disable,
3678+ .irq_shutdown = gpmc_irq_noop,
3679+ .irq_ack = gpmc_irq_noop,
3680+ .irq_mask = gpmc_irq_noop,
3681+ .irq_unmask = gpmc_irq_noop,
3682+
3683+};
3684+
3685 static int gpmc_setup_irq(void)
3686 {
3687 int i;
3688@@ -750,15 +761,6 @@ static int gpmc_setup_irq(void)
3689 return gpmc_irq_start;
3690 }
3691
3692- gpmc_irq_chip.name = "gpmc";
3693- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
3694- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
3695- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
3696- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
3697- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
3698- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
3699- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
3700-
3701 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
3702 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
3703
3704diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3705index 4001325..b14e2a0 100644
3706--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3707+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3708@@ -84,7 +84,7 @@ struct cpu_pm_ops {
3709 int (*finish_suspend)(unsigned long cpu_state);
3710 void (*resume)(void);
3711 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
3712-};
3713+} __no_const;
3714
3715 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
3716 static struct powerdomain *mpuss_pd;
3717@@ -102,7 +102,7 @@ static void dummy_cpu_resume(void)
3718 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
3719 {}
3720
3721-struct cpu_pm_ops omap_pm_ops = {
3722+static struct cpu_pm_ops omap_pm_ops __read_only = {
3723 .finish_suspend = default_finish_suspend,
3724 .resume = dummy_cpu_resume,
3725 .scu_prepare = dummy_scu_prepare,
3726diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3727index 37843a7..a98df13 100644
3728--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3729+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3730@@ -343,7 +343,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
3731 return NOTIFY_OK;
3732 }
3733
3734-static struct notifier_block __refdata irq_hotplug_notifier = {
3735+static struct notifier_block irq_hotplug_notifier = {
3736 .notifier_call = irq_cpu_hotplug_notify,
3737 };
3738
3739diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3740index d22c30d..23697a1 100644
3741--- a/arch/arm/mach-omap2/omap_device.c
3742+++ b/arch/arm/mach-omap2/omap_device.c
3743@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od)
3744 struct platform_device __init *omap_device_build(const char *pdev_name,
3745 int pdev_id,
3746 struct omap_hwmod *oh,
3747- void *pdata, int pdata_len)
3748+ const void *pdata, int pdata_len)
3749 {
3750 struct omap_hwmod *ohs[] = { oh };
3751
3752@@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3753 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3754 int pdev_id,
3755 struct omap_hwmod **ohs,
3756- int oh_cnt, void *pdata,
3757+ int oh_cnt, const void *pdata,
3758 int pdata_len)
3759 {
3760 int ret = -ENOMEM;
3761diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3762index 78c02b3..c94109a 100644
3763--- a/arch/arm/mach-omap2/omap_device.h
3764+++ b/arch/arm/mach-omap2/omap_device.h
3765@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3766 /* Core code interface */
3767
3768 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3769- struct omap_hwmod *oh, void *pdata,
3770+ struct omap_hwmod *oh, const void *pdata,
3771 int pdata_len);
3772
3773 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3774 struct omap_hwmod **oh, int oh_cnt,
3775- void *pdata, int pdata_len);
3776+ const void *pdata, int pdata_len);
3777
3778 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3779 struct omap_hwmod **ohs, int oh_cnt);
3780diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3781index 9e91a4e..357ed0d 100644
3782--- a/arch/arm/mach-omap2/omap_hwmod.c
3783+++ b/arch/arm/mach-omap2/omap_hwmod.c
3784@@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops {
3785 int (*init_clkdm)(struct omap_hwmod *oh);
3786 void (*update_context_lost)(struct omap_hwmod *oh);
3787 int (*get_context_lost)(struct omap_hwmod *oh);
3788-};
3789+} __no_const;
3790
3791 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3792-static struct omap_hwmod_soc_ops soc_ops;
3793+static struct omap_hwmod_soc_ops soc_ops __read_only;
3794
3795 /* omap_hwmod_list contains all registered struct omap_hwmods */
3796 static LIST_HEAD(omap_hwmod_list);
3797diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
3798index 95fee54..cfa9cf1 100644
3799--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
3800+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
3801@@ -10,6 +10,7 @@
3802
3803 #include <linux/kernel.h>
3804 #include <linux/init.h>
3805+#include <asm/pgtable.h>
3806
3807 #include "powerdomain.h"
3808
3809@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
3810
3811 void __init am43xx_powerdomains_init(void)
3812 {
3813- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3814+ pax_open_kernel();
3815+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3816+ pax_close_kernel();
3817 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
3818 pwrdm_register_pwrdms(powerdomains_am43xx);
3819 pwrdm_complete_init();
3820diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3821index 97d6607..8429d14 100644
3822--- a/arch/arm/mach-omap2/wd_timer.c
3823+++ b/arch/arm/mach-omap2/wd_timer.c
3824@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3825 struct omap_hwmod *oh;
3826 char *oh_name = "wd_timer2";
3827 char *dev_name = "omap_wdt";
3828- struct omap_wd_timer_platform_data pdata;
3829+ static struct omap_wd_timer_platform_data pdata = {
3830+ .read_reset_sources = prm_read_reset_sources
3831+ };
3832
3833 if (!cpu_class_is_omap2() || of_have_populated_dt())
3834 return 0;
3835@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3836 return -EINVAL;
3837 }
3838
3839- pdata.read_reset_sources = prm_read_reset_sources;
3840-
3841 pdev = omap_device_build(dev_name, id, oh, &pdata,
3842 sizeof(struct omap_wd_timer_platform_data));
3843 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3844diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3845index b30bf5c..d0825bf 100644
3846--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3847+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3848@@ -180,7 +180,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3849 bool entered_lp2 = false;
3850
3851 if (tegra_pending_sgi())
3852- ACCESS_ONCE(abort_flag) = true;
3853+ ACCESS_ONCE_RW(abort_flag) = true;
3854
3855 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3856
3857diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3858index 2dea8b5..6499da2 100644
3859--- a/arch/arm/mach-ux500/setup.h
3860+++ b/arch/arm/mach-ux500/setup.h
3861@@ -33,13 +33,6 @@ extern void ux500_timer_init(void);
3862 .type = MT_DEVICE, \
3863 }
3864
3865-#define __MEM_DEV_DESC(x, sz) { \
3866- .virtual = IO_ADDRESS(x), \
3867- .pfn = __phys_to_pfn(x), \
3868- .length = sz, \
3869- .type = MT_MEMORY_RWX, \
3870-}
3871-
3872 extern struct smp_operations ux500_smp_ops;
3873 extern void ux500_cpu_die(unsigned int cpu);
3874
3875diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3876index ae69809..2665202 100644
3877--- a/arch/arm/mm/Kconfig
3878+++ b/arch/arm/mm/Kconfig
3879@@ -446,6 +446,7 @@ config CPU_32v5
3880
3881 config CPU_32v6
3882 bool
3883+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3884 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3885
3886 config CPU_32v6K
3887@@ -600,6 +601,7 @@ config CPU_CP15_MPU
3888
3889 config CPU_USE_DOMAINS
3890 bool
3891+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3892 help
3893 This option enables or disables the use of domain switching
3894 via the set_fs() function.
3895@@ -799,6 +801,7 @@ config NEED_KUSER_HELPERS
3896 config KUSER_HELPERS
3897 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3898 default y
3899+ depends on !(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND
3900 help
3901 Warning: disabling this option may break user programs.
3902
3903@@ -811,7 +814,7 @@ config KUSER_HELPERS
3904 See Documentation/arm/kernel_user_helpers.txt for details.
3905
3906 However, the fixed address nature of these helpers can be used
3907- by ROP (return orientated programming) authors when creating
3908+ by ROP (Return Oriented Programming) authors when creating
3909 exploits.
3910
3911 If all of the binaries and libraries which run on your platform
3912diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3913index 83792f4..c25d36b 100644
3914--- a/arch/arm/mm/alignment.c
3915+++ b/arch/arm/mm/alignment.c
3916@@ -216,10 +216,12 @@ union offset_union {
3917 #define __get16_unaligned_check(ins,val,addr) \
3918 do { \
3919 unsigned int err = 0, v, a = addr; \
3920+ pax_open_userland(); \
3921 __get8_unaligned_check(ins,v,a,err); \
3922 val = v << ((BE) ? 8 : 0); \
3923 __get8_unaligned_check(ins,v,a,err); \
3924 val |= v << ((BE) ? 0 : 8); \
3925+ pax_close_userland(); \
3926 if (err) \
3927 goto fault; \
3928 } while (0)
3929@@ -233,6 +235,7 @@ union offset_union {
3930 #define __get32_unaligned_check(ins,val,addr) \
3931 do { \
3932 unsigned int err = 0, v, a = addr; \
3933+ pax_open_userland(); \
3934 __get8_unaligned_check(ins,v,a,err); \
3935 val = v << ((BE) ? 24 : 0); \
3936 __get8_unaligned_check(ins,v,a,err); \
3937@@ -241,6 +244,7 @@ union offset_union {
3938 val |= v << ((BE) ? 8 : 16); \
3939 __get8_unaligned_check(ins,v,a,err); \
3940 val |= v << ((BE) ? 0 : 24); \
3941+ pax_close_userland(); \
3942 if (err) \
3943 goto fault; \
3944 } while (0)
3945@@ -254,6 +258,7 @@ union offset_union {
3946 #define __put16_unaligned_check(ins,val,addr) \
3947 do { \
3948 unsigned int err = 0, v = val, a = addr; \
3949+ pax_open_userland(); \
3950 __asm__( FIRST_BYTE_16 \
3951 ARM( "1: "ins" %1, [%2], #1\n" ) \
3952 THUMB( "1: "ins" %1, [%2]\n" ) \
3953@@ -273,6 +278,7 @@ union offset_union {
3954 " .popsection\n" \
3955 : "=r" (err), "=&r" (v), "=&r" (a) \
3956 : "0" (err), "1" (v), "2" (a)); \
3957+ pax_close_userland(); \
3958 if (err) \
3959 goto fault; \
3960 } while (0)
3961@@ -286,6 +292,7 @@ union offset_union {
3962 #define __put32_unaligned_check(ins,val,addr) \
3963 do { \
3964 unsigned int err = 0, v = val, a = addr; \
3965+ pax_open_userland(); \
3966 __asm__( FIRST_BYTE_32 \
3967 ARM( "1: "ins" %1, [%2], #1\n" ) \
3968 THUMB( "1: "ins" %1, [%2]\n" ) \
3969@@ -315,6 +322,7 @@ union offset_union {
3970 " .popsection\n" \
3971 : "=r" (err), "=&r" (v), "=&r" (a) \
3972 : "0" (err), "1" (v), "2" (a)); \
3973+ pax_close_userland(); \
3974 if (err) \
3975 goto fault; \
3976 } while (0)
3977diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
3978index 5f2c988..221412d 100644
3979--- a/arch/arm/mm/cache-l2x0.c
3980+++ b/arch/arm/mm/cache-l2x0.c
3981@@ -41,7 +41,7 @@ struct l2c_init_data {
3982 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
3983 void (*save)(void __iomem *);
3984 struct outer_cache_fns outer_cache;
3985-};
3986+} __do_const;
3987
3988 #define CACHE_LINE_SIZE 32
3989
3990diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3991index 6eb97b3..ac509f6 100644
3992--- a/arch/arm/mm/context.c
3993+++ b/arch/arm/mm/context.c
3994@@ -43,7 +43,7 @@
3995 #define NUM_USER_ASIDS ASID_FIRST_VERSION
3996
3997 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3998-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3999+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
4000 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
4001
4002 static DEFINE_PER_CPU(atomic64_t, active_asids);
4003@@ -182,7 +182,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
4004 {
4005 static u32 cur_idx = 1;
4006 u64 asid = atomic64_read(&mm->context.id);
4007- u64 generation = atomic64_read(&asid_generation);
4008+ u64 generation = atomic64_read_unchecked(&asid_generation);
4009
4010 if (asid != 0 && is_reserved_asid(asid)) {
4011 /*
4012@@ -203,7 +203,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
4013 */
4014 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
4015 if (asid == NUM_USER_ASIDS) {
4016- generation = atomic64_add_return(ASID_FIRST_VERSION,
4017+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
4018 &asid_generation);
4019 flush_context(cpu);
4020 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
4021@@ -234,14 +234,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
4022 cpu_set_reserved_ttbr0();
4023
4024 asid = atomic64_read(&mm->context.id);
4025- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
4026+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
4027 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
4028 goto switch_mm_fastpath;
4029
4030 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
4031 /* Check that our ASID belongs to the current generation. */
4032 asid = atomic64_read(&mm->context.id);
4033- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
4034+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
4035 asid = new_context(mm, cpu);
4036 atomic64_set(&mm->context.id, asid);
4037 }
4038diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
4039index eb8830a..e8ff52e 100644
4040--- a/arch/arm/mm/fault.c
4041+++ b/arch/arm/mm/fault.c
4042@@ -25,6 +25,7 @@
4043 #include <asm/system_misc.h>
4044 #include <asm/system_info.h>
4045 #include <asm/tlbflush.h>
4046+#include <asm/sections.h>
4047
4048 #include "fault.h"
4049
4050@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
4051 if (fixup_exception(regs))
4052 return;
4053
4054+#ifdef CONFIG_PAX_MEMORY_UDEREF
4055+ if (addr < TASK_SIZE) {
4056+ if (current->signal->curr_ip)
4057+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4058+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4059+ else
4060+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
4061+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4062+ }
4063+#endif
4064+
4065+#ifdef CONFIG_PAX_KERNEXEC
4066+ if ((fsr & FSR_WRITE) &&
4067+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
4068+ (MODULES_VADDR <= addr && addr < MODULES_END)))
4069+ {
4070+ if (current->signal->curr_ip)
4071+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4072+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
4073+ else
4074+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
4075+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
4076+ }
4077+#endif
4078+
4079 /*
4080 * No handler, we'll have to terminate things with extreme prejudice.
4081 */
4082@@ -174,6 +200,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
4083 }
4084 #endif
4085
4086+#ifdef CONFIG_PAX_PAGEEXEC
4087+ if (fsr & FSR_LNX_PF) {
4088+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
4089+ do_group_exit(SIGKILL);
4090+ }
4091+#endif
4092+
4093 tsk->thread.address = addr;
4094 tsk->thread.error_code = fsr;
4095 tsk->thread.trap_no = 14;
4096@@ -401,6 +434,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
4097 }
4098 #endif /* CONFIG_MMU */
4099
4100+#ifdef CONFIG_PAX_PAGEEXEC
4101+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4102+{
4103+ long i;
4104+
4105+ printk(KERN_ERR "PAX: bytes at PC: ");
4106+ for (i = 0; i < 20; i++) {
4107+ unsigned char c;
4108+ if (get_user(c, (__force unsigned char __user *)pc+i))
4109+ printk(KERN_CONT "?? ");
4110+ else
4111+ printk(KERN_CONT "%02x ", c);
4112+ }
4113+ printk("\n");
4114+
4115+ printk(KERN_ERR "PAX: bytes at SP-4: ");
4116+ for (i = -1; i < 20; i++) {
4117+ unsigned long c;
4118+ if (get_user(c, (__force unsigned long __user *)sp+i))
4119+ printk(KERN_CONT "???????? ");
4120+ else
4121+ printk(KERN_CONT "%08lx ", c);
4122+ }
4123+ printk("\n");
4124+}
4125+#endif
4126+
4127 /*
4128 * First Level Translation Fault Handler
4129 *
4130@@ -548,9 +608,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
4131 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
4132 struct siginfo info;
4133
4134+#ifdef CONFIG_PAX_MEMORY_UDEREF
4135+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
4136+ if (current->signal->curr_ip)
4137+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4138+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4139+ else
4140+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
4141+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4142+ goto die;
4143+ }
4144+#endif
4145+
4146 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
4147 return;
4148
4149+die:
4150 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
4151 inf->name, fsr, addr);
4152
4153@@ -574,15 +647,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
4154 ifsr_info[nr].name = name;
4155 }
4156
4157+asmlinkage int sys_sigreturn(struct pt_regs *regs);
4158+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
4159+
4160 asmlinkage void __exception
4161 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
4162 {
4163 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
4164 struct siginfo info;
4165+ unsigned long pc = instruction_pointer(regs);
4166+
4167+ if (user_mode(regs)) {
4168+ unsigned long sigpage = current->mm->context.sigpage;
4169+
4170+ if (sigpage <= pc && pc < sigpage + 7*4) {
4171+ if (pc < sigpage + 3*4)
4172+ sys_sigreturn(regs);
4173+ else
4174+ sys_rt_sigreturn(regs);
4175+ return;
4176+ }
4177+ if (pc == 0xffff0f60UL) {
4178+ /*
4179+ * PaX: __kuser_cmpxchg64 emulation
4180+ */
4181+ // TODO
4182+ //regs->ARM_pc = regs->ARM_lr;
4183+ //return;
4184+ }
4185+ if (pc == 0xffff0fa0UL) {
4186+ /*
4187+ * PaX: __kuser_memory_barrier emulation
4188+ */
4189+ // dmb(); implied by the exception
4190+ regs->ARM_pc = regs->ARM_lr;
4191+ return;
4192+ }
4193+ if (pc == 0xffff0fc0UL) {
4194+ /*
4195+ * PaX: __kuser_cmpxchg emulation
4196+ */
4197+ // TODO
4198+ //long new;
4199+ //int op;
4200+
4201+ //op = FUTEX_OP_SET << 28;
4202+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4203+ //regs->ARM_r0 = old != new;
4204+ //regs->ARM_pc = regs->ARM_lr;
4205+ //return;
4206+ }
4207+ if (pc == 0xffff0fe0UL) {
4208+ /*
4209+ * PaX: __kuser_get_tls emulation
4210+ */
4211+ regs->ARM_r0 = current_thread_info()->tp_value[0];
4212+ regs->ARM_pc = regs->ARM_lr;
4213+ return;
4214+ }
4215+ }
4216+
4217+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4218+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4219+ if (current->signal->curr_ip)
4220+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4221+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4222+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4223+ else
4224+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4225+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4226+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4227+ goto die;
4228+ }
4229+#endif
4230+
4231+#ifdef CONFIG_PAX_REFCOUNT
4232+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4233+#ifdef CONFIG_THUMB2_KERNEL
4234+ unsigned short bkpt;
4235+
4236+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
4237+#else
4238+ unsigned int bkpt;
4239+
4240+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4241+#endif
4242+ current->thread.error_code = ifsr;
4243+ current->thread.trap_no = 0;
4244+ pax_report_refcount_overflow(regs);
4245+ fixup_exception(regs);
4246+ return;
4247+ }
4248+ }
4249+#endif
4250
4251 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4252 return;
4253
4254+die:
4255 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4256 inf->name, ifsr, addr);
4257
4258diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4259index cf08bdf..772656c 100644
4260--- a/arch/arm/mm/fault.h
4261+++ b/arch/arm/mm/fault.h
4262@@ -3,6 +3,7 @@
4263
4264 /*
4265 * Fault status register encodings. We steal bit 31 for our own purposes.
4266+ * Set when the FSR value is from an instruction fault.
4267 */
4268 #define FSR_LNX_PF (1 << 31)
4269 #define FSR_WRITE (1 << 11)
4270@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4271 }
4272 #endif
4273
4274+/* valid for LPAE and !LPAE */
4275+static inline int is_xn_fault(unsigned int fsr)
4276+{
4277+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4278+}
4279+
4280+static inline int is_domain_fault(unsigned int fsr)
4281+{
4282+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4283+}
4284+
4285 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4286 unsigned long search_exception_table(unsigned long addr);
4287
4288diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4289index 659c75d..6f8c029 100644
4290--- a/arch/arm/mm/init.c
4291+++ b/arch/arm/mm/init.c
4292@@ -31,6 +31,8 @@
4293 #include <asm/setup.h>
4294 #include <asm/tlb.h>
4295 #include <asm/fixmap.h>
4296+#include <asm/system_info.h>
4297+#include <asm/cp15.h>
4298
4299 #include <asm/mach/arch.h>
4300 #include <asm/mach/map.h>
4301@@ -619,7 +621,46 @@ void free_initmem(void)
4302 {
4303 #ifdef CONFIG_HAVE_TCM
4304 extern char __tcm_start, __tcm_end;
4305+#endif
4306
4307+#ifdef CONFIG_PAX_KERNEXEC
4308+ unsigned long addr;
4309+ pgd_t *pgd;
4310+ pud_t *pud;
4311+ pmd_t *pmd;
4312+ int cpu_arch = cpu_architecture();
4313+ unsigned int cr = get_cr();
4314+
4315+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4316+ /* make pages tables, etc before .text NX */
4317+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4318+ pgd = pgd_offset_k(addr);
4319+ pud = pud_offset(pgd, addr);
4320+ pmd = pmd_offset(pud, addr);
4321+ __section_update(pmd, addr, PMD_SECT_XN);
4322+ }
4323+ /* make init NX */
4324+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4325+ pgd = pgd_offset_k(addr);
4326+ pud = pud_offset(pgd, addr);
4327+ pmd = pmd_offset(pud, addr);
4328+ __section_update(pmd, addr, PMD_SECT_XN);
4329+ }
4330+ /* make kernel code/rodata RX */
4331+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4332+ pgd = pgd_offset_k(addr);
4333+ pud = pud_offset(pgd, addr);
4334+ pmd = pmd_offset(pud, addr);
4335+#ifdef CONFIG_ARM_LPAE
4336+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4337+#else
4338+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4339+#endif
4340+ }
4341+ }
4342+#endif
4343+
4344+#ifdef CONFIG_HAVE_TCM
4345 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4346 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4347 #endif
4348diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4349index d1e5ad7..84dcbf2 100644
4350--- a/arch/arm/mm/ioremap.c
4351+++ b/arch/arm/mm/ioremap.c
4352@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4353 unsigned int mtype;
4354
4355 if (cached)
4356- mtype = MT_MEMORY_RWX;
4357+ mtype = MT_MEMORY_RX;
4358 else
4359- mtype = MT_MEMORY_RWX_NONCACHED;
4360+ mtype = MT_MEMORY_RX_NONCACHED;
4361
4362 return __arm_ioremap_caller(phys_addr, size, mtype,
4363 __builtin_return_address(0));
4364diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4365index 5e85ed3..b10a7ed 100644
4366--- a/arch/arm/mm/mmap.c
4367+++ b/arch/arm/mm/mmap.c
4368@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4369 struct vm_area_struct *vma;
4370 int do_align = 0;
4371 int aliasing = cache_is_vipt_aliasing();
4372+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4373 struct vm_unmapped_area_info info;
4374
4375 /*
4376@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4377 if (len > TASK_SIZE)
4378 return -ENOMEM;
4379
4380+#ifdef CONFIG_PAX_RANDMMAP
4381+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4382+#endif
4383+
4384 if (addr) {
4385 if (do_align)
4386 addr = COLOUR_ALIGN(addr, pgoff);
4387@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4388 addr = PAGE_ALIGN(addr);
4389
4390 vma = find_vma(mm, addr);
4391- if (TASK_SIZE - len >= addr &&
4392- (!vma || addr + len <= vma->vm_start))
4393+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4394 return addr;
4395 }
4396
4397@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4398 info.high_limit = TASK_SIZE;
4399 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4400 info.align_offset = pgoff << PAGE_SHIFT;
4401+ info.threadstack_offset = offset;
4402 return vm_unmapped_area(&info);
4403 }
4404
4405@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4406 unsigned long addr = addr0;
4407 int do_align = 0;
4408 int aliasing = cache_is_vipt_aliasing();
4409+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4410 struct vm_unmapped_area_info info;
4411
4412 /*
4413@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4414 return addr;
4415 }
4416
4417+#ifdef CONFIG_PAX_RANDMMAP
4418+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4419+#endif
4420+
4421 /* requesting a specific address */
4422 if (addr) {
4423 if (do_align)
4424@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4425 else
4426 addr = PAGE_ALIGN(addr);
4427 vma = find_vma(mm, addr);
4428- if (TASK_SIZE - len >= addr &&
4429- (!vma || addr + len <= vma->vm_start))
4430+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4431 return addr;
4432 }
4433
4434@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4435 info.high_limit = mm->mmap_base;
4436 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4437 info.align_offset = pgoff << PAGE_SHIFT;
4438+ info.threadstack_offset = offset;
4439 addr = vm_unmapped_area(&info);
4440
4441 /*
4442@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4443 {
4444 unsigned long random_factor = 0UL;
4445
4446+#ifdef CONFIG_PAX_RANDMMAP
4447+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4448+#endif
4449+
4450 /* 8 bits of randomness in 20 address space bits */
4451 if ((current->flags & PF_RANDOMIZE) &&
4452 !(current->personality & ADDR_NO_RANDOMIZE))
4453@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4454
4455 if (mmap_is_legacy()) {
4456 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4457+
4458+#ifdef CONFIG_PAX_RANDMMAP
4459+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4460+ mm->mmap_base += mm->delta_mmap;
4461+#endif
4462+
4463 mm->get_unmapped_area = arch_get_unmapped_area;
4464 } else {
4465 mm->mmap_base = mmap_base(random_factor);
4466+
4467+#ifdef CONFIG_PAX_RANDMMAP
4468+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4469+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4470+#endif
4471+
4472 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4473 }
4474 }
4475diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4476index 8348ed6..b73a807 100644
4477--- a/arch/arm/mm/mmu.c
4478+++ b/arch/arm/mm/mmu.c
4479@@ -40,6 +40,22 @@
4480 #include "mm.h"
4481 #include "tcm.h"
4482
4483+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4484+void modify_domain(unsigned int dom, unsigned int type)
4485+{
4486+ struct thread_info *thread = current_thread_info();
4487+ unsigned int domain = thread->cpu_domain;
4488+ /*
4489+ * DOMAIN_MANAGER might be defined to some other value,
4490+ * use the arch-defined constant
4491+ */
4492+ domain &= ~domain_val(dom, 3);
4493+ thread->cpu_domain = domain | domain_val(dom, type);
4494+ set_domain(thread->cpu_domain);
4495+}
4496+EXPORT_SYMBOL(modify_domain);
4497+#endif
4498+
4499 /*
4500 * empty_zero_page is a special page that is used for
4501 * zero-initialized data and COW.
4502@@ -239,7 +255,15 @@ __setup("noalign", noalign_setup);
4503 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
4504 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4505
4506-static struct mem_type mem_types[] = {
4507+#ifdef CONFIG_PAX_KERNEXEC
4508+#define L_PTE_KERNEXEC L_PTE_RDONLY
4509+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4510+#else
4511+#define L_PTE_KERNEXEC L_PTE_DIRTY
4512+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4513+#endif
4514+
4515+static struct mem_type mem_types[] __read_only = {
4516 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4517 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4518 L_PTE_SHARED,
4519@@ -268,19 +292,19 @@ static struct mem_type mem_types[] = {
4520 .prot_sect = PROT_SECT_DEVICE,
4521 .domain = DOMAIN_IO,
4522 },
4523- [MT_UNCACHED] = {
4524+ [MT_UNCACHED_RW] = {
4525 .prot_pte = PROT_PTE_DEVICE,
4526 .prot_l1 = PMD_TYPE_TABLE,
4527 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4528 .domain = DOMAIN_IO,
4529 },
4530- [MT_CACHECLEAN] = {
4531- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4532+ [MT_CACHECLEAN_RO] = {
4533+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
4534 .domain = DOMAIN_KERNEL,
4535 },
4536 #ifndef CONFIG_ARM_LPAE
4537- [MT_MINICLEAN] = {
4538- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4539+ [MT_MINICLEAN_RO] = {
4540+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
4541 .domain = DOMAIN_KERNEL,
4542 },
4543 #endif
4544@@ -288,15 +312,15 @@ static struct mem_type mem_types[] = {
4545 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4546 L_PTE_RDONLY,
4547 .prot_l1 = PMD_TYPE_TABLE,
4548- .domain = DOMAIN_USER,
4549+ .domain = DOMAIN_VECTORS,
4550 },
4551 [MT_HIGH_VECTORS] = {
4552 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4553 L_PTE_USER | L_PTE_RDONLY,
4554 .prot_l1 = PMD_TYPE_TABLE,
4555- .domain = DOMAIN_USER,
4556+ .domain = DOMAIN_VECTORS,
4557 },
4558- [MT_MEMORY_RWX] = {
4559+ [__MT_MEMORY_RWX] = {
4560 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4561 .prot_l1 = PMD_TYPE_TABLE,
4562 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4563@@ -309,17 +333,30 @@ static struct mem_type mem_types[] = {
4564 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4565 .domain = DOMAIN_KERNEL,
4566 },
4567- [MT_ROM] = {
4568- .prot_sect = PMD_TYPE_SECT,
4569+ [MT_MEMORY_RX] = {
4570+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4571+ .prot_l1 = PMD_TYPE_TABLE,
4572+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4573+ .domain = DOMAIN_KERNEL,
4574+ },
4575+ [MT_ROM_RX] = {
4576+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4577 .domain = DOMAIN_KERNEL,
4578 },
4579- [MT_MEMORY_RWX_NONCACHED] = {
4580+ [MT_MEMORY_RW_NONCACHED] = {
4581 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4582 L_PTE_MT_BUFFERABLE,
4583 .prot_l1 = PMD_TYPE_TABLE,
4584 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4585 .domain = DOMAIN_KERNEL,
4586 },
4587+ [MT_MEMORY_RX_NONCACHED] = {
4588+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4589+ L_PTE_MT_BUFFERABLE,
4590+ .prot_l1 = PMD_TYPE_TABLE,
4591+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4592+ .domain = DOMAIN_KERNEL,
4593+ },
4594 [MT_MEMORY_RW_DTCM] = {
4595 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4596 L_PTE_XN,
4597@@ -327,9 +364,10 @@ static struct mem_type mem_types[] = {
4598 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4599 .domain = DOMAIN_KERNEL,
4600 },
4601- [MT_MEMORY_RWX_ITCM] = {
4602- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4603+ [MT_MEMORY_RX_ITCM] = {
4604+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4605 .prot_l1 = PMD_TYPE_TABLE,
4606+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4607 .domain = DOMAIN_KERNEL,
4608 },
4609 [MT_MEMORY_RW_SO] = {
4610@@ -547,9 +585,14 @@ static void __init build_mem_type_table(void)
4611 * Mark cache clean areas and XIP ROM read only
4612 * from SVC mode and no access from userspace.
4613 */
4614- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4615- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4616- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4617+ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4618+#ifdef CONFIG_PAX_KERNEXEC
4619+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4620+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4621+ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4622+#endif
4623+ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4624+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4625 #endif
4626
4627 /*
4628@@ -566,13 +609,17 @@ static void __init build_mem_type_table(void)
4629 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4630 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4631 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4632- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4633- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4634+ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4635+ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4636 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4637 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4638+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4639+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4640 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4641- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
4642- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
4643+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
4644+ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
4645+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
4646+ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
4647 }
4648 }
4649
4650@@ -583,15 +630,20 @@ static void __init build_mem_type_table(void)
4651 if (cpu_arch >= CPU_ARCH_ARMv6) {
4652 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4653 /* Non-cacheable Normal is XCB = 001 */
4654- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4655+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4656+ PMD_SECT_BUFFERED;
4657+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4658 PMD_SECT_BUFFERED;
4659 } else {
4660 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4661- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4662+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4663+ PMD_SECT_TEX(1);
4664+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4665 PMD_SECT_TEX(1);
4666 }
4667 } else {
4668- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4669+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4670+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4671 }
4672
4673 #ifdef CONFIG_ARM_LPAE
4674@@ -607,6 +659,8 @@ static void __init build_mem_type_table(void)
4675 vecs_pgprot |= PTE_EXT_AF;
4676 #endif
4677
4678+ user_pgprot |= __supported_pte_mask;
4679+
4680 for (i = 0; i < 16; i++) {
4681 pteval_t v = pgprot_val(protection_map[i]);
4682 protection_map[i] = __pgprot(v | user_pgprot);
4683@@ -624,21 +678,24 @@ static void __init build_mem_type_table(void)
4684
4685 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4686 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4687- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4688- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4689+ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4690+ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4691 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4692 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4693+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4694+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4695 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4696- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
4697- mem_types[MT_ROM].prot_sect |= cp->pmd;
4698+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
4699+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
4700+ mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
4701
4702 switch (cp->pmd) {
4703 case PMD_SECT_WT:
4704- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
4705+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
4706 break;
4707 case PMD_SECT_WB:
4708 case PMD_SECT_WBWA:
4709- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
4710+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
4711 break;
4712 }
4713 pr_info("Memory policy: %sData cache %s\n",
4714@@ -856,7 +913,7 @@ static void __init create_mapping(struct map_desc *md)
4715 return;
4716 }
4717
4718- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
4719+ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
4720 md->virtual >= PAGE_OFFSET &&
4721 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
4722 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
4723@@ -1224,18 +1281,15 @@ void __init arm_mm_memblock_reserve(void)
4724 * called function. This means you can't use any function or debugging
4725 * method which may touch any device, otherwise the kernel _will_ crash.
4726 */
4727+
4728+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4729+
4730 static void __init devicemaps_init(const struct machine_desc *mdesc)
4731 {
4732 struct map_desc map;
4733 unsigned long addr;
4734- void *vectors;
4735
4736- /*
4737- * Allocate the vector page early.
4738- */
4739- vectors = early_alloc(PAGE_SIZE * 2);
4740-
4741- early_trap_init(vectors);
4742+ early_trap_init(&vectors);
4743
4744 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4745 pmd_clear(pmd_off_k(addr));
4746@@ -1248,7 +1302,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4747 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
4748 map.virtual = MODULES_VADDR;
4749 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
4750- map.type = MT_ROM;
4751+ map.type = MT_ROM_RX;
4752 create_mapping(&map);
4753 #endif
4754
4755@@ -1259,14 +1313,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4756 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
4757 map.virtual = FLUSH_BASE;
4758 map.length = SZ_1M;
4759- map.type = MT_CACHECLEAN;
4760+ map.type = MT_CACHECLEAN_RO;
4761 create_mapping(&map);
4762 #endif
4763 #ifdef FLUSH_BASE_MINICACHE
4764 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
4765 map.virtual = FLUSH_BASE_MINICACHE;
4766 map.length = SZ_1M;
4767- map.type = MT_MINICLEAN;
4768+ map.type = MT_MINICLEAN_RO;
4769 create_mapping(&map);
4770 #endif
4771
4772@@ -1275,7 +1329,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4773 * location (0xffff0000). If we aren't using high-vectors, also
4774 * create a mapping at the low-vectors virtual address.
4775 */
4776- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4777+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4778 map.virtual = 0xffff0000;
4779 map.length = PAGE_SIZE;
4780 #ifdef CONFIG_KUSER_HELPERS
4781@@ -1335,8 +1389,10 @@ static void __init kmap_init(void)
4782 static void __init map_lowmem(void)
4783 {
4784 struct memblock_region *reg;
4785+#ifndef CONFIG_PAX_KERNEXEC
4786 unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
4787 unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
4788+#endif
4789
4790 /* Map all the lowmem memory banks. */
4791 for_each_memblock(memory, reg) {
4792@@ -1349,11 +1405,48 @@ static void __init map_lowmem(void)
4793 if (start >= end)
4794 break;
4795
4796+#ifdef CONFIG_PAX_KERNEXEC
4797+ map.pfn = __phys_to_pfn(start);
4798+ map.virtual = __phys_to_virt(start);
4799+ map.length = end - start;
4800+
4801+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4802+ struct map_desc kernel;
4803+ struct map_desc initmap;
4804+
4805+ /* when freeing initmem we will make this RW */
4806+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4807+ initmap.virtual = (unsigned long)__init_begin;
4808+ initmap.length = _sdata - __init_begin;
4809+ initmap.type = __MT_MEMORY_RWX;
4810+ create_mapping(&initmap);
4811+
4812+ /* when freeing initmem we will make this RX */
4813+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4814+ kernel.virtual = (unsigned long)_stext;
4815+ kernel.length = __init_begin - _stext;
4816+ kernel.type = __MT_MEMORY_RWX;
4817+ create_mapping(&kernel);
4818+
4819+ if (map.virtual < (unsigned long)_stext) {
4820+ map.length = (unsigned long)_stext - map.virtual;
4821+ map.type = __MT_MEMORY_RWX;
4822+ create_mapping(&map);
4823+ }
4824+
4825+ map.pfn = __phys_to_pfn(__pa(_sdata));
4826+ map.virtual = (unsigned long)_sdata;
4827+ map.length = end - __pa(_sdata);
4828+ }
4829+
4830+ map.type = MT_MEMORY_RW;
4831+ create_mapping(&map);
4832+#else
4833 if (end < kernel_x_start || start >= kernel_x_end) {
4834 map.pfn = __phys_to_pfn(start);
4835 map.virtual = __phys_to_virt(start);
4836 map.length = end - start;
4837- map.type = MT_MEMORY_RWX;
4838+ map.type = __MT_MEMORY_RWX;
4839
4840 create_mapping(&map);
4841 } else {
4842@@ -1370,7 +1463,7 @@ static void __init map_lowmem(void)
4843 map.pfn = __phys_to_pfn(kernel_x_start);
4844 map.virtual = __phys_to_virt(kernel_x_start);
4845 map.length = kernel_x_end - kernel_x_start;
4846- map.type = MT_MEMORY_RWX;
4847+ map.type = __MT_MEMORY_RWX;
4848
4849 create_mapping(&map);
4850
4851@@ -1383,6 +1476,7 @@ static void __init map_lowmem(void)
4852 create_mapping(&map);
4853 }
4854 }
4855+#endif
4856 }
4857 }
4858
4859diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
4860index a37b989..5c9ae75 100644
4861--- a/arch/arm/net/bpf_jit_32.c
4862+++ b/arch/arm/net/bpf_jit_32.c
4863@@ -71,7 +71,11 @@ struct jit_ctx {
4864 #endif
4865 };
4866
4867+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
4868+int bpf_jit_enable __read_only;
4869+#else
4870 int bpf_jit_enable __read_mostly;
4871+#endif
4872
4873 static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
4874 {
4875@@ -930,5 +934,6 @@ void bpf_jit_free(struct bpf_prog *fp)
4876 {
4877 if (fp->jited)
4878 module_free(NULL, fp->bpf_func);
4879- kfree(fp);
4880+
4881+ bpf_prog_unlock_free(fp);
4882 }
4883diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
4884index 5b217f4..c23f40e 100644
4885--- a/arch/arm/plat-iop/setup.c
4886+++ b/arch/arm/plat-iop/setup.c
4887@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
4888 .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
4889 .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
4890 .length = IOP3XX_PERIPHERAL_SIZE,
4891- .type = MT_UNCACHED,
4892+ .type = MT_UNCACHED_RW,
4893 },
4894 };
4895
4896diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4897index a5bc92d..0bb4730 100644
4898--- a/arch/arm/plat-omap/sram.c
4899+++ b/arch/arm/plat-omap/sram.c
4900@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4901 * Looks like we need to preserve some bootloader code at the
4902 * beginning of SRAM for jumping to flash for reboot to work...
4903 */
4904+ pax_open_kernel();
4905 memset_io(omap_sram_base + omap_sram_skip, 0,
4906 omap_sram_size - omap_sram_skip);
4907+ pax_close_kernel();
4908 }
4909diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4910index ce6d763..cfea917 100644
4911--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4912+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4913@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4914 int (*started)(unsigned ch);
4915 int (*flush)(unsigned ch);
4916 int (*stop)(unsigned ch);
4917-};
4918+} __no_const;
4919
4920 extern void *samsung_dmadev_get_ops(void);
4921 extern void *s3c_dma_get_ops(void);
4922diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
4923index 6389d60..b5d3bdd 100644
4924--- a/arch/arm64/include/asm/barrier.h
4925+++ b/arch/arm64/include/asm/barrier.h
4926@@ -41,7 +41,7 @@
4927 do { \
4928 compiletime_assert_atomic_type(*p); \
4929 barrier(); \
4930- ACCESS_ONCE(*p) = (v); \
4931+ ACCESS_ONCE_RW(*p) = (v); \
4932 } while (0)
4933
4934 #define smp_load_acquire(p) \
4935diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
4936index 3bf8f4e..5dd5491 100644
4937--- a/arch/arm64/include/asm/uaccess.h
4938+++ b/arch/arm64/include/asm/uaccess.h
4939@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
4940 flag; \
4941 })
4942
4943+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
4944 #define access_ok(type, addr, size) __range_ok(addr, size)
4945 #define user_addr_max get_fs
4946
4947diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4948index c3a58a1..78fbf54 100644
4949--- a/arch/avr32/include/asm/cache.h
4950+++ b/arch/avr32/include/asm/cache.h
4951@@ -1,8 +1,10 @@
4952 #ifndef __ASM_AVR32_CACHE_H
4953 #define __ASM_AVR32_CACHE_H
4954
4955+#include <linux/const.h>
4956+
4957 #define L1_CACHE_SHIFT 5
4958-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4959+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4960
4961 /*
4962 * Memory returned by kmalloc() may be used for DMA, so we must make
4963diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4964index d232888..87c8df1 100644
4965--- a/arch/avr32/include/asm/elf.h
4966+++ b/arch/avr32/include/asm/elf.h
4967@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4968 the loader. We need to make sure that it is out of the way of the program
4969 that it will "exec", and that there is sufficient room for the brk. */
4970
4971-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4972+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4973
4974+#ifdef CONFIG_PAX_ASLR
4975+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4976+
4977+#define PAX_DELTA_MMAP_LEN 15
4978+#define PAX_DELTA_STACK_LEN 15
4979+#endif
4980
4981 /* This yields a mask that user programs can use to figure out what
4982 instruction set this CPU supports. This could be done in user space,
4983diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4984index 479330b..53717a8 100644
4985--- a/arch/avr32/include/asm/kmap_types.h
4986+++ b/arch/avr32/include/asm/kmap_types.h
4987@@ -2,9 +2,9 @@
4988 #define __ASM_AVR32_KMAP_TYPES_H
4989
4990 #ifdef CONFIG_DEBUG_HIGHMEM
4991-# define KM_TYPE_NR 29
4992+# define KM_TYPE_NR 30
4993 #else
4994-# define KM_TYPE_NR 14
4995+# define KM_TYPE_NR 15
4996 #endif
4997
4998 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4999diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
5000index 0eca933..eb78c7b 100644
5001--- a/arch/avr32/mm/fault.c
5002+++ b/arch/avr32/mm/fault.c
5003@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
5004
5005 int exception_trace = 1;
5006
5007+#ifdef CONFIG_PAX_PAGEEXEC
5008+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5009+{
5010+ unsigned long i;
5011+
5012+ printk(KERN_ERR "PAX: bytes at PC: ");
5013+ for (i = 0; i < 20; i++) {
5014+ unsigned char c;
5015+ if (get_user(c, (unsigned char *)pc+i))
5016+ printk(KERN_CONT "???????? ");
5017+ else
5018+ printk(KERN_CONT "%02x ", c);
5019+ }
5020+ printk("\n");
5021+}
5022+#endif
5023+
5024 /*
5025 * This routine handles page faults. It determines the address and the
5026 * problem, and then passes it off to one of the appropriate routines.
5027@@ -176,6 +193,16 @@ bad_area:
5028 up_read(&mm->mmap_sem);
5029
5030 if (user_mode(regs)) {
5031+
5032+#ifdef CONFIG_PAX_PAGEEXEC
5033+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
5034+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
5035+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
5036+ do_group_exit(SIGKILL);
5037+ }
5038+ }
5039+#endif
5040+
5041 if (exception_trace && printk_ratelimit())
5042 printk("%s%s[%d]: segfault at %08lx pc %08lx "
5043 "sp %08lx ecr %lu\n",
5044diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
5045index 568885a..f8008df 100644
5046--- a/arch/blackfin/include/asm/cache.h
5047+++ b/arch/blackfin/include/asm/cache.h
5048@@ -7,6 +7,7 @@
5049 #ifndef __ARCH_BLACKFIN_CACHE_H
5050 #define __ARCH_BLACKFIN_CACHE_H
5051
5052+#include <linux/const.h>
5053 #include <linux/linkage.h> /* for asmlinkage */
5054
5055 /*
5056@@ -14,7 +15,7 @@
5057 * Blackfin loads 32 bytes for cache
5058 */
5059 #define L1_CACHE_SHIFT 5
5060-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5061+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5062 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5063
5064 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5065diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
5066index aea2718..3639a60 100644
5067--- a/arch/cris/include/arch-v10/arch/cache.h
5068+++ b/arch/cris/include/arch-v10/arch/cache.h
5069@@ -1,8 +1,9 @@
5070 #ifndef _ASM_ARCH_CACHE_H
5071 #define _ASM_ARCH_CACHE_H
5072
5073+#include <linux/const.h>
5074 /* Etrax 100LX have 32-byte cache-lines. */
5075-#define L1_CACHE_BYTES 32
5076 #define L1_CACHE_SHIFT 5
5077+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5078
5079 #endif /* _ASM_ARCH_CACHE_H */
5080diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
5081index 7caf25d..ee65ac5 100644
5082--- a/arch/cris/include/arch-v32/arch/cache.h
5083+++ b/arch/cris/include/arch-v32/arch/cache.h
5084@@ -1,11 +1,12 @@
5085 #ifndef _ASM_CRIS_ARCH_CACHE_H
5086 #define _ASM_CRIS_ARCH_CACHE_H
5087
5088+#include <linux/const.h>
5089 #include <arch/hwregs/dma.h>
5090
5091 /* A cache-line is 32 bytes. */
5092-#define L1_CACHE_BYTES 32
5093 #define L1_CACHE_SHIFT 5
5094+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5095
5096 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5097
5098diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
5099index f6c3a16..cd422a4 100644
5100--- a/arch/frv/include/asm/atomic.h
5101+++ b/arch/frv/include/asm/atomic.h
5102@@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
5103 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
5104 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
5105
5106+#define atomic64_read_unchecked(v) atomic64_read(v)
5107+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5108+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5109+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5110+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5111+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5112+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5113+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5114+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5115+
5116 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5117 {
5118 int c, old;
5119diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
5120index 2797163..c2a401d 100644
5121--- a/arch/frv/include/asm/cache.h
5122+++ b/arch/frv/include/asm/cache.h
5123@@ -12,10 +12,11 @@
5124 #ifndef __ASM_CACHE_H
5125 #define __ASM_CACHE_H
5126
5127+#include <linux/const.h>
5128
5129 /* bytes per L1 cache line */
5130 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
5131-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5132+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5133
5134 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5135 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5136diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
5137index 43901f2..0d8b865 100644
5138--- a/arch/frv/include/asm/kmap_types.h
5139+++ b/arch/frv/include/asm/kmap_types.h
5140@@ -2,6 +2,6 @@
5141 #ifndef _ASM_KMAP_TYPES_H
5142 #define _ASM_KMAP_TYPES_H
5143
5144-#define KM_TYPE_NR 17
5145+#define KM_TYPE_NR 18
5146
5147 #endif
5148diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
5149index 836f147..4cf23f5 100644
5150--- a/arch/frv/mm/elf-fdpic.c
5151+++ b/arch/frv/mm/elf-fdpic.c
5152@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5153 {
5154 struct vm_area_struct *vma;
5155 struct vm_unmapped_area_info info;
5156+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5157
5158 if (len > TASK_SIZE)
5159 return -ENOMEM;
5160@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5161 if (addr) {
5162 addr = PAGE_ALIGN(addr);
5163 vma = find_vma(current->mm, addr);
5164- if (TASK_SIZE - len >= addr &&
5165- (!vma || addr + len <= vma->vm_start))
5166+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
5167 goto success;
5168 }
5169
5170@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5171 info.high_limit = (current->mm->start_stack - 0x00200000);
5172 info.align_mask = 0;
5173 info.align_offset = 0;
5174+ info.threadstack_offset = offset;
5175 addr = vm_unmapped_area(&info);
5176 if (!(addr & ~PAGE_MASK))
5177 goto success;
5178diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
5179index 2635117..fa223cb 100644
5180--- a/arch/hexagon/include/asm/cache.h
5181+++ b/arch/hexagon/include/asm/cache.h
5182@@ -21,9 +21,11 @@
5183 #ifndef __ASM_CACHE_H
5184 #define __ASM_CACHE_H
5185
5186+#include <linux/const.h>
5187+
5188 /* Bytes per L1 cache line */
5189-#define L1_CACHE_SHIFT (5)
5190-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5191+#define L1_CACHE_SHIFT 5
5192+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5193
5194 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
5195 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
5196diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
5197index c84c88b..2a6e1ba 100644
5198--- a/arch/ia64/Kconfig
5199+++ b/arch/ia64/Kconfig
5200@@ -549,6 +549,7 @@ source "drivers/sn/Kconfig"
5201 config KEXEC
5202 bool "kexec system call"
5203 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
5204+ depends on !GRKERNSEC_KMEM
5205 help
5206 kexec is a system call that implements the ability to shutdown your
5207 current kernel, and to start another kernel. It is like a reboot
5208diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
5209index 5441b14..039a446 100644
5210--- a/arch/ia64/Makefile
5211+++ b/arch/ia64/Makefile
5212@@ -99,5 +99,6 @@ endef
5213 archprepare: make_nr_irqs_h FORCE
5214 PHONY += make_nr_irqs_h FORCE
5215
5216+make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
5217 make_nr_irqs_h: FORCE
5218 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
5219diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
5220index 0f8bf48..40ea950 100644
5221--- a/arch/ia64/include/asm/atomic.h
5222+++ b/arch/ia64/include/asm/atomic.h
5223@@ -209,4 +209,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
5224 #define atomic64_inc(v) atomic64_add(1, (v))
5225 #define atomic64_dec(v) atomic64_sub(1, (v))
5226
5227+#define atomic64_read_unchecked(v) atomic64_read(v)
5228+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5229+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5230+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5231+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5232+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5233+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5234+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5235+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5236+
5237 #endif /* _ASM_IA64_ATOMIC_H */
5238diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
5239index a48957c..e097b56 100644
5240--- a/arch/ia64/include/asm/barrier.h
5241+++ b/arch/ia64/include/asm/barrier.h
5242@@ -67,7 +67,7 @@
5243 do { \
5244 compiletime_assert_atomic_type(*p); \
5245 barrier(); \
5246- ACCESS_ONCE(*p) = (v); \
5247+ ACCESS_ONCE_RW(*p) = (v); \
5248 } while (0)
5249
5250 #define smp_load_acquire(p) \
5251diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
5252index 988254a..e1ee885 100644
5253--- a/arch/ia64/include/asm/cache.h
5254+++ b/arch/ia64/include/asm/cache.h
5255@@ -1,6 +1,7 @@
5256 #ifndef _ASM_IA64_CACHE_H
5257 #define _ASM_IA64_CACHE_H
5258
5259+#include <linux/const.h>
5260
5261 /*
5262 * Copyright (C) 1998-2000 Hewlett-Packard Co
5263@@ -9,7 +10,7 @@
5264
5265 /* Bytes per L1 (data) cache line. */
5266 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
5267-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5268+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5269
5270 #ifdef CONFIG_SMP
5271 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5272diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
5273index 5a83c5c..4d7f553 100644
5274--- a/arch/ia64/include/asm/elf.h
5275+++ b/arch/ia64/include/asm/elf.h
5276@@ -42,6 +42,13 @@
5277 */
5278 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
5279
5280+#ifdef CONFIG_PAX_ASLR
5281+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
5282+
5283+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5284+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5285+#endif
5286+
5287 #define PT_IA_64_UNWIND 0x70000001
5288
5289 /* IA-64 relocations: */
5290diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
5291index 5767cdf..7462574 100644
5292--- a/arch/ia64/include/asm/pgalloc.h
5293+++ b/arch/ia64/include/asm/pgalloc.h
5294@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5295 pgd_val(*pgd_entry) = __pa(pud);
5296 }
5297
5298+static inline void
5299+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5300+{
5301+ pgd_populate(mm, pgd_entry, pud);
5302+}
5303+
5304 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
5305 {
5306 return quicklist_alloc(0, GFP_KERNEL, NULL);
5307@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5308 pud_val(*pud_entry) = __pa(pmd);
5309 }
5310
5311+static inline void
5312+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5313+{
5314+ pud_populate(mm, pud_entry, pmd);
5315+}
5316+
5317 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5318 {
5319 return quicklist_alloc(0, GFP_KERNEL, NULL);
5320diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5321index 7935115..c0eca6a 100644
5322--- a/arch/ia64/include/asm/pgtable.h
5323+++ b/arch/ia64/include/asm/pgtable.h
5324@@ -12,7 +12,7 @@
5325 * David Mosberger-Tang <davidm@hpl.hp.com>
5326 */
5327
5328-
5329+#include <linux/const.h>
5330 #include <asm/mman.h>
5331 #include <asm/page.h>
5332 #include <asm/processor.h>
5333@@ -142,6 +142,17 @@
5334 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5335 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5336 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5337+
5338+#ifdef CONFIG_PAX_PAGEEXEC
5339+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5340+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5341+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5342+#else
5343+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5344+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5345+# define PAGE_COPY_NOEXEC PAGE_COPY
5346+#endif
5347+
5348 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5349 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5350 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5351diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5352index 45698cd..e8e2dbc 100644
5353--- a/arch/ia64/include/asm/spinlock.h
5354+++ b/arch/ia64/include/asm/spinlock.h
5355@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5356 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5357
5358 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5359- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5360+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5361 }
5362
5363 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5364diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5365index 449c8c0..3d4b1e9 100644
5366--- a/arch/ia64/include/asm/uaccess.h
5367+++ b/arch/ia64/include/asm/uaccess.h
5368@@ -70,6 +70,7 @@
5369 && ((segment).seg == KERNEL_DS.seg \
5370 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
5371 })
5372+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5373 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
5374
5375 /*
5376@@ -240,12 +241,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5377 static inline unsigned long
5378 __copy_to_user (void __user *to, const void *from, unsigned long count)
5379 {
5380+ if (count > INT_MAX)
5381+ return count;
5382+
5383+ if (!__builtin_constant_p(count))
5384+ check_object_size(from, count, true);
5385+
5386 return __copy_user(to, (__force void __user *) from, count);
5387 }
5388
5389 static inline unsigned long
5390 __copy_from_user (void *to, const void __user *from, unsigned long count)
5391 {
5392+ if (count > INT_MAX)
5393+ return count;
5394+
5395+ if (!__builtin_constant_p(count))
5396+ check_object_size(to, count, false);
5397+
5398 return __copy_user((__force void __user *) to, from, count);
5399 }
5400
5401@@ -255,10 +268,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5402 ({ \
5403 void __user *__cu_to = (to); \
5404 const void *__cu_from = (from); \
5405- long __cu_len = (n); \
5406+ unsigned long __cu_len = (n); \
5407 \
5408- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5409+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5410+ if (!__builtin_constant_p(n)) \
5411+ check_object_size(__cu_from, __cu_len, true); \
5412 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5413+ } \
5414 __cu_len; \
5415 })
5416
5417@@ -266,11 +282,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5418 ({ \
5419 void *__cu_to = (to); \
5420 const void __user *__cu_from = (from); \
5421- long __cu_len = (n); \
5422+ unsigned long __cu_len = (n); \
5423 \
5424 __chk_user_ptr(__cu_from); \
5425- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5426+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5427+ if (!__builtin_constant_p(n)) \
5428+ check_object_size(__cu_to, __cu_len, false); \
5429 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5430+ } \
5431 __cu_len; \
5432 })
5433
5434diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5435index 24603be..948052d 100644
5436--- a/arch/ia64/kernel/module.c
5437+++ b/arch/ia64/kernel/module.c
5438@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
5439 void
5440 module_free (struct module *mod, void *module_region)
5441 {
5442- if (mod && mod->arch.init_unw_table &&
5443- module_region == mod->module_init) {
5444+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
5445 unw_remove_unwind_table(mod->arch.init_unw_table);
5446 mod->arch.init_unw_table = NULL;
5447 }
5448@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5449 }
5450
5451 static inline int
5452+in_init_rx (const struct module *mod, uint64_t addr)
5453+{
5454+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5455+}
5456+
5457+static inline int
5458+in_init_rw (const struct module *mod, uint64_t addr)
5459+{
5460+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5461+}
5462+
5463+static inline int
5464 in_init (const struct module *mod, uint64_t addr)
5465 {
5466- return addr - (uint64_t) mod->module_init < mod->init_size;
5467+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5468+}
5469+
5470+static inline int
5471+in_core_rx (const struct module *mod, uint64_t addr)
5472+{
5473+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5474+}
5475+
5476+static inline int
5477+in_core_rw (const struct module *mod, uint64_t addr)
5478+{
5479+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5480 }
5481
5482 static inline int
5483 in_core (const struct module *mod, uint64_t addr)
5484 {
5485- return addr - (uint64_t) mod->module_core < mod->core_size;
5486+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5487 }
5488
5489 static inline int
5490@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5491 break;
5492
5493 case RV_BDREL:
5494- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5495+ if (in_init_rx(mod, val))
5496+ val -= (uint64_t) mod->module_init_rx;
5497+ else if (in_init_rw(mod, val))
5498+ val -= (uint64_t) mod->module_init_rw;
5499+ else if (in_core_rx(mod, val))
5500+ val -= (uint64_t) mod->module_core_rx;
5501+ else if (in_core_rw(mod, val))
5502+ val -= (uint64_t) mod->module_core_rw;
5503 break;
5504
5505 case RV_LTV:
5506@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5507 * addresses have been selected...
5508 */
5509 uint64_t gp;
5510- if (mod->core_size > MAX_LTOFF)
5511+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5512 /*
5513 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5514 * at the end of the module.
5515 */
5516- gp = mod->core_size - MAX_LTOFF / 2;
5517+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5518 else
5519- gp = mod->core_size / 2;
5520- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5521+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5522+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5523 mod->arch.gp = gp;
5524 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5525 }
5526diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5527index c39c3cd..3c77738 100644
5528--- a/arch/ia64/kernel/palinfo.c
5529+++ b/arch/ia64/kernel/palinfo.c
5530@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5531 return NOTIFY_OK;
5532 }
5533
5534-static struct notifier_block __refdata palinfo_cpu_notifier =
5535+static struct notifier_block palinfo_cpu_notifier =
5536 {
5537 .notifier_call = palinfo_cpu_callback,
5538 .priority = 0,
5539diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5540index 41e33f8..65180b2a 100644
5541--- a/arch/ia64/kernel/sys_ia64.c
5542+++ b/arch/ia64/kernel/sys_ia64.c
5543@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5544 unsigned long align_mask = 0;
5545 struct mm_struct *mm = current->mm;
5546 struct vm_unmapped_area_info info;
5547+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5548
5549 if (len > RGN_MAP_LIMIT)
5550 return -ENOMEM;
5551@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5552 if (REGION_NUMBER(addr) == RGN_HPAGE)
5553 addr = 0;
5554 #endif
5555+
5556+#ifdef CONFIG_PAX_RANDMMAP
5557+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5558+ addr = mm->free_area_cache;
5559+ else
5560+#endif
5561+
5562 if (!addr)
5563 addr = TASK_UNMAPPED_BASE;
5564
5565@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5566 info.high_limit = TASK_SIZE;
5567 info.align_mask = align_mask;
5568 info.align_offset = 0;
5569+ info.threadstack_offset = offset;
5570 return vm_unmapped_area(&info);
5571 }
5572
5573diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5574index 84f8a52..7c76178 100644
5575--- a/arch/ia64/kernel/vmlinux.lds.S
5576+++ b/arch/ia64/kernel/vmlinux.lds.S
5577@@ -192,7 +192,7 @@ SECTIONS {
5578 /* Per-cpu data: */
5579 . = ALIGN(PERCPU_PAGE_SIZE);
5580 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5581- __phys_per_cpu_start = __per_cpu_load;
5582+ __phys_per_cpu_start = per_cpu_load;
5583 /*
5584 * ensure percpu data fits
5585 * into percpu page size
5586diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5587index 7225dad..2a7c8256 100644
5588--- a/arch/ia64/mm/fault.c
5589+++ b/arch/ia64/mm/fault.c
5590@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5591 return pte_present(pte);
5592 }
5593
5594+#ifdef CONFIG_PAX_PAGEEXEC
5595+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5596+{
5597+ unsigned long i;
5598+
5599+ printk(KERN_ERR "PAX: bytes at PC: ");
5600+ for (i = 0; i < 8; i++) {
5601+ unsigned int c;
5602+ if (get_user(c, (unsigned int *)pc+i))
5603+ printk(KERN_CONT "???????? ");
5604+ else
5605+ printk(KERN_CONT "%08x ", c);
5606+ }
5607+ printk("\n");
5608+}
5609+#endif
5610+
5611 # define VM_READ_BIT 0
5612 # define VM_WRITE_BIT 1
5613 # define VM_EXEC_BIT 2
5614@@ -151,8 +168,21 @@ retry:
5615 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5616 goto bad_area;
5617
5618- if ((vma->vm_flags & mask) != mask)
5619+ if ((vma->vm_flags & mask) != mask) {
5620+
5621+#ifdef CONFIG_PAX_PAGEEXEC
5622+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5623+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5624+ goto bad_area;
5625+
5626+ up_read(&mm->mmap_sem);
5627+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5628+ do_group_exit(SIGKILL);
5629+ }
5630+#endif
5631+
5632 goto bad_area;
5633+ }
5634
5635 /*
5636 * If for any reason at all we couldn't handle the fault, make
5637diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5638index 76069c1..c2aa816 100644
5639--- a/arch/ia64/mm/hugetlbpage.c
5640+++ b/arch/ia64/mm/hugetlbpage.c
5641@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5642 unsigned long pgoff, unsigned long flags)
5643 {
5644 struct vm_unmapped_area_info info;
5645+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5646
5647 if (len > RGN_MAP_LIMIT)
5648 return -ENOMEM;
5649@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5650 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5651 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5652 info.align_offset = 0;
5653+ info.threadstack_offset = offset;
5654 return vm_unmapped_area(&info);
5655 }
5656
5657diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5658index 6b33457..88b5124 100644
5659--- a/arch/ia64/mm/init.c
5660+++ b/arch/ia64/mm/init.c
5661@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5662 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5663 vma->vm_end = vma->vm_start + PAGE_SIZE;
5664 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5665+
5666+#ifdef CONFIG_PAX_PAGEEXEC
5667+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5668+ vma->vm_flags &= ~VM_EXEC;
5669+
5670+#ifdef CONFIG_PAX_MPROTECT
5671+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5672+ vma->vm_flags &= ~VM_MAYEXEC;
5673+#endif
5674+
5675+ }
5676+#endif
5677+
5678 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5679 down_write(&current->mm->mmap_sem);
5680 if (insert_vm_struct(current->mm, vma)) {
5681@@ -286,7 +299,7 @@ static int __init gate_vma_init(void)
5682 gate_vma.vm_start = FIXADDR_USER_START;
5683 gate_vma.vm_end = FIXADDR_USER_END;
5684 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
5685- gate_vma.vm_page_prot = __P101;
5686+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
5687
5688 return 0;
5689 }
5690diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5691index 40b3ee9..8c2c112 100644
5692--- a/arch/m32r/include/asm/cache.h
5693+++ b/arch/m32r/include/asm/cache.h
5694@@ -1,8 +1,10 @@
5695 #ifndef _ASM_M32R_CACHE_H
5696 #define _ASM_M32R_CACHE_H
5697
5698+#include <linux/const.h>
5699+
5700 /* L1 cache line size */
5701 #define L1_CACHE_SHIFT 4
5702-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5703+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5704
5705 #endif /* _ASM_M32R_CACHE_H */
5706diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5707index 82abd15..d95ae5d 100644
5708--- a/arch/m32r/lib/usercopy.c
5709+++ b/arch/m32r/lib/usercopy.c
5710@@ -14,6 +14,9 @@
5711 unsigned long
5712 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5713 {
5714+ if ((long)n < 0)
5715+ return n;
5716+
5717 prefetch(from);
5718 if (access_ok(VERIFY_WRITE, to, n))
5719 __copy_user(to,from,n);
5720@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5721 unsigned long
5722 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5723 {
5724+ if ((long)n < 0)
5725+ return n;
5726+
5727 prefetchw(to);
5728 if (access_ok(VERIFY_READ, from, n))
5729 __copy_user_zeroing(to,from,n);
5730diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5731index 0395c51..5f26031 100644
5732--- a/arch/m68k/include/asm/cache.h
5733+++ b/arch/m68k/include/asm/cache.h
5734@@ -4,9 +4,11 @@
5735 #ifndef __ARCH_M68K_CACHE_H
5736 #define __ARCH_M68K_CACHE_H
5737
5738+#include <linux/const.h>
5739+
5740 /* bytes per L1 cache line */
5741 #define L1_CACHE_SHIFT 4
5742-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5743+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5744
5745 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5746
5747diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
5748index c7591e8..ecef036 100644
5749--- a/arch/metag/include/asm/barrier.h
5750+++ b/arch/metag/include/asm/barrier.h
5751@@ -89,7 +89,7 @@ static inline void fence(void)
5752 do { \
5753 compiletime_assert_atomic_type(*p); \
5754 smp_mb(); \
5755- ACCESS_ONCE(*p) = (v); \
5756+ ACCESS_ONCE_RW(*p) = (v); \
5757 } while (0)
5758
5759 #define smp_load_acquire(p) \
5760diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5761index 3c32075..ae0ae75 100644
5762--- a/arch/metag/mm/hugetlbpage.c
5763+++ b/arch/metag/mm/hugetlbpage.c
5764@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5765 info.high_limit = TASK_SIZE;
5766 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5767 info.align_offset = 0;
5768+ info.threadstack_offset = 0;
5769 return vm_unmapped_area(&info);
5770 }
5771
5772diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5773index 4efe96a..60e8699 100644
5774--- a/arch/microblaze/include/asm/cache.h
5775+++ b/arch/microblaze/include/asm/cache.h
5776@@ -13,11 +13,12 @@
5777 #ifndef _ASM_MICROBLAZE_CACHE_H
5778 #define _ASM_MICROBLAZE_CACHE_H
5779
5780+#include <linux/const.h>
5781 #include <asm/registers.h>
5782
5783 #define L1_CACHE_SHIFT 5
5784 /* word-granular cache in microblaze */
5785-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5786+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5787
5788 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5789
5790diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
5791index 574c430..470200d 100644
5792--- a/arch/mips/Kconfig
5793+++ b/arch/mips/Kconfig
5794@@ -2399,6 +2399,7 @@ source "kernel/Kconfig.preempt"
5795
5796 config KEXEC
5797 bool "Kexec system call"
5798+ depends on !GRKERNSEC_KMEM
5799 help
5800 kexec is a system call that implements the ability to shutdown your
5801 current kernel, and to start another kernel. It is like a reboot
5802diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
5803index 02f2444..506969c 100644
5804--- a/arch/mips/cavium-octeon/dma-octeon.c
5805+++ b/arch/mips/cavium-octeon/dma-octeon.c
5806@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
5807 if (dma_release_from_coherent(dev, order, vaddr))
5808 return;
5809
5810- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
5811+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
5812 }
5813
5814 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
5815diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5816index 37b2bef..02122b8 100644
5817--- a/arch/mips/include/asm/atomic.h
5818+++ b/arch/mips/include/asm/atomic.h
5819@@ -21,15 +21,39 @@
5820 #include <asm/cmpxchg.h>
5821 #include <asm/war.h>
5822
5823+#ifdef CONFIG_GENERIC_ATOMIC64
5824+#include <asm-generic/atomic64.h>
5825+#endif
5826+
5827 #define ATOMIC_INIT(i) { (i) }
5828
5829+#ifdef CONFIG_64BIT
5830+#define _ASM_EXTABLE(from, to) \
5831+" .section __ex_table,\"a\"\n" \
5832+" .dword " #from ", " #to"\n" \
5833+" .previous\n"
5834+#else
5835+#define _ASM_EXTABLE(from, to) \
5836+" .section __ex_table,\"a\"\n" \
5837+" .word " #from ", " #to"\n" \
5838+" .previous\n"
5839+#endif
5840+
5841 /*
5842 * atomic_read - read atomic variable
5843 * @v: pointer of type atomic_t
5844 *
5845 * Atomically reads the value of @v.
5846 */
5847-#define atomic_read(v) (*(volatile int *)&(v)->counter)
5848+static inline int atomic_read(const atomic_t *v)
5849+{
5850+ return (*(volatile const int *) &v->counter);
5851+}
5852+
5853+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5854+{
5855+ return (*(volatile const int *) &v->counter);
5856+}
5857
5858 /*
5859 * atomic_set - set atomic variable
5860@@ -38,7 +62,15 @@
5861 *
5862 * Atomically sets the value of @v to @i.
5863 */
5864-#define atomic_set(v, i) ((v)->counter = (i))
5865+static inline void atomic_set(atomic_t *v, int i)
5866+{
5867+ v->counter = i;
5868+}
5869+
5870+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5871+{
5872+ v->counter = i;
5873+}
5874
5875 /*
5876 * atomic_add - add integer to atomic variable
5877@@ -47,7 +79,67 @@
5878 *
5879 * Atomically adds @i to @v.
5880 */
5881-static __inline__ void atomic_add(int i, atomic_t * v)
5882+static __inline__ void atomic_add(int i, atomic_t *v)
5883+{
5884+ int temp;
5885+
5886+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5887+ __asm__ __volatile__(
5888+ " .set mips3 \n"
5889+ "1: ll %0, %1 # atomic_add \n"
5890+#ifdef CONFIG_PAX_REFCOUNT
5891+ /* Exception on overflow. */
5892+ "2: add %0, %2 \n"
5893+#else
5894+ " addu %0, %2 \n"
5895+#endif
5896+ " sc %0, %1 \n"
5897+ " beqzl %0, 1b \n"
5898+#ifdef CONFIG_PAX_REFCOUNT
5899+ "3: \n"
5900+ _ASM_EXTABLE(2b, 3b)
5901+#endif
5902+ " .set mips0 \n"
5903+ : "=&r" (temp), "+m" (v->counter)
5904+ : "Ir" (i));
5905+ } else if (kernel_uses_llsc) {
5906+ __asm__ __volatile__(
5907+ " .set mips3 \n"
5908+ "1: ll %0, %1 # atomic_add \n"
5909+#ifdef CONFIG_PAX_REFCOUNT
5910+ /* Exception on overflow. */
5911+ "2: add %0, %2 \n"
5912+#else
5913+ " addu %0, %2 \n"
5914+#endif
5915+ " sc %0, %1 \n"
5916+ " beqz %0, 1b \n"
5917+#ifdef CONFIG_PAX_REFCOUNT
5918+ "3: \n"
5919+ _ASM_EXTABLE(2b, 3b)
5920+#endif
5921+ " .set mips0 \n"
5922+ : "=&r" (temp), "+m" (v->counter)
5923+ : "Ir" (i));
5924+ } else {
5925+ unsigned long flags;
5926+
5927+ raw_local_irq_save(flags);
5928+ __asm__ __volatile__(
5929+#ifdef CONFIG_PAX_REFCOUNT
5930+ /* Exception on overflow. */
5931+ "1: add %0, %1 \n"
5932+ "2: \n"
5933+ _ASM_EXTABLE(1b, 2b)
5934+#else
5935+ " addu %0, %1 \n"
5936+#endif
5937+ : "+r" (v->counter) : "Ir" (i));
5938+ raw_local_irq_restore(flags);
5939+ }
5940+}
5941+
5942+static __inline__ void atomic_add_unchecked(int i, atomic_unchecked_t *v)
5943 {
5944 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5945 int temp;
5946@@ -90,7 +182,67 @@ static __inline__ void atomic_add(int i, atomic_t * v)
5947 *
5948 * Atomically subtracts @i from @v.
5949 */
5950-static __inline__ void atomic_sub(int i, atomic_t * v)
5951+static __inline__ void atomic_sub(int i, atomic_t *v)
5952+{
5953+ int temp;
5954+
5955+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5956+ __asm__ __volatile__(
5957+ " .set mips3 \n"
5958+ "1: ll %0, %1 # atomic64_sub \n"
5959+#ifdef CONFIG_PAX_REFCOUNT
5960+ /* Exception on overflow. */
5961+ "2: sub %0, %2 \n"
5962+#else
5963+ " subu %0, %2 \n"
5964+#endif
5965+ " sc %0, %1 \n"
5966+ " beqzl %0, 1b \n"
5967+#ifdef CONFIG_PAX_REFCOUNT
5968+ "3: \n"
5969+ _ASM_EXTABLE(2b, 3b)
5970+#endif
5971+ " .set mips0 \n"
5972+ : "=&r" (temp), "+m" (v->counter)
5973+ : "Ir" (i));
5974+ } else if (kernel_uses_llsc) {
5975+ __asm__ __volatile__(
5976+ " .set mips3 \n"
5977+ "1: ll %0, %1 # atomic64_sub \n"
5978+#ifdef CONFIG_PAX_REFCOUNT
5979+ /* Exception on overflow. */
5980+ "2: sub %0, %2 \n"
5981+#else
5982+ " subu %0, %2 \n"
5983+#endif
5984+ " sc %0, %1 \n"
5985+ " beqz %0, 1b \n"
5986+#ifdef CONFIG_PAX_REFCOUNT
5987+ "3: \n"
5988+ _ASM_EXTABLE(2b, 3b)
5989+#endif
5990+ " .set mips0 \n"
5991+ : "=&r" (temp), "+m" (v->counter)
5992+ : "Ir" (i));
5993+ } else {
5994+ unsigned long flags;
5995+
5996+ raw_local_irq_save(flags);
5997+ __asm__ __volatile__(
5998+#ifdef CONFIG_PAX_REFCOUNT
5999+ /* Exception on overflow. */
6000+ "1: sub %0, %1 \n"
6001+ "2: \n"
6002+ _ASM_EXTABLE(1b, 2b)
6003+#else
6004+ " subu %0, %1 \n"
6005+#endif
6006+ : "+r" (v->counter) : "Ir" (i));
6007+ raw_local_irq_restore(flags);
6008+ }
6009+}
6010+
6011+static __inline__ void atomic_sub_unchecked(long i, atomic_unchecked_t *v)
6012 {
6013 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6014 int temp;
6015@@ -129,7 +281,93 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
6016 /*
6017 * Same as above, but return the result value
6018 */
6019-static __inline__ int atomic_add_return(int i, atomic_t * v)
6020+static __inline__ int atomic_add_return(int i, atomic_t *v)
6021+{
6022+ int result;
6023+ int temp;
6024+
6025+ smp_mb__before_llsc();
6026+
6027+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6028+ __asm__ __volatile__(
6029+ " .set mips3 \n"
6030+ "1: ll %1, %2 # atomic_add_return \n"
6031+#ifdef CONFIG_PAX_REFCOUNT
6032+ "2: add %0, %1, %3 \n"
6033+#else
6034+ " addu %0, %1, %3 \n"
6035+#endif
6036+ " sc %0, %2 \n"
6037+ " beqzl %0, 1b \n"
6038+#ifdef CONFIG_PAX_REFCOUNT
6039+ " b 4f \n"
6040+ " .set noreorder \n"
6041+ "3: b 5f \n"
6042+ " move %0, %1 \n"
6043+ " .set reorder \n"
6044+ _ASM_EXTABLE(2b, 3b)
6045+#endif
6046+ "4: addu %0, %1, %3 \n"
6047+#ifdef CONFIG_PAX_REFCOUNT
6048+ "5: \n"
6049+#endif
6050+ " .set mips0 \n"
6051+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6052+ : "Ir" (i));
6053+ } else if (kernel_uses_llsc) {
6054+ __asm__ __volatile__(
6055+ " .set mips3 \n"
6056+ "1: ll %1, %2 # atomic_add_return \n"
6057+#ifdef CONFIG_PAX_REFCOUNT
6058+ "2: add %0, %1, %3 \n"
6059+#else
6060+ " addu %0, %1, %3 \n"
6061+#endif
6062+ " sc %0, %2 \n"
6063+ " bnez %0, 4f \n"
6064+ " b 1b \n"
6065+#ifdef CONFIG_PAX_REFCOUNT
6066+ " .set noreorder \n"
6067+ "3: b 5f \n"
6068+ " move %0, %1 \n"
6069+ " .set reorder \n"
6070+ _ASM_EXTABLE(2b, 3b)
6071+#endif
6072+ "4: addu %0, %1, %3 \n"
6073+#ifdef CONFIG_PAX_REFCOUNT
6074+ "5: \n"
6075+#endif
6076+ " .set mips0 \n"
6077+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6078+ : "Ir" (i));
6079+ } else {
6080+ unsigned long flags;
6081+
6082+ raw_local_irq_save(flags);
6083+ __asm__ __volatile__(
6084+ " lw %0, %1 \n"
6085+#ifdef CONFIG_PAX_REFCOUNT
6086+ /* Exception on overflow. */
6087+ "1: add %0, %2 \n"
6088+#else
6089+ " addu %0, %2 \n"
6090+#endif
6091+ " sw %0, %1 \n"
6092+#ifdef CONFIG_PAX_REFCOUNT
6093+ /* Note: Dest reg is not modified on overflow */
6094+ "2: \n"
6095+ _ASM_EXTABLE(1b, 2b)
6096+#endif
6097+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6098+ raw_local_irq_restore(flags);
6099+ }
6100+
6101+ smp_llsc_mb();
6102+
6103+ return result;
6104+}
6105+
6106+static __inline__ int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6107 {
6108 int result;
6109
6110@@ -178,7 +416,93 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
6111 return result;
6112 }
6113
6114-static __inline__ int atomic_sub_return(int i, atomic_t * v)
6115+static __inline__ int atomic_sub_return(int i, atomic_t *v)
6116+{
6117+ int result;
6118+ int temp;
6119+
6120+ smp_mb__before_llsc();
6121+
6122+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6123+ __asm__ __volatile__(
6124+ " .set mips3 \n"
6125+ "1: ll %1, %2 # atomic_sub_return \n"
6126+#ifdef CONFIG_PAX_REFCOUNT
6127+ "2: sub %0, %1, %3 \n"
6128+#else
6129+ " subu %0, %1, %3 \n"
6130+#endif
6131+ " sc %0, %2 \n"
6132+ " beqzl %0, 1b \n"
6133+#ifdef CONFIG_PAX_REFCOUNT
6134+ " b 4f \n"
6135+ " .set noreorder \n"
6136+ "3: b 5f \n"
6137+ " move %0, %1 \n"
6138+ " .set reorder \n"
6139+ _ASM_EXTABLE(2b, 3b)
6140+#endif
6141+ "4: subu %0, %1, %3 \n"
6142+#ifdef CONFIG_PAX_REFCOUNT
6143+ "5: \n"
6144+#endif
6145+ " .set mips0 \n"
6146+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6147+ : "Ir" (i), "m" (v->counter)
6148+ : "memory");
6149+ } else if (kernel_uses_llsc) {
6150+ __asm__ __volatile__(
6151+ " .set mips3 \n"
6152+ "1: ll %1, %2 # atomic_sub_return \n"
6153+#ifdef CONFIG_PAX_REFCOUNT
6154+ "2: sub %0, %1, %3 \n"
6155+#else
6156+ " subu %0, %1, %3 \n"
6157+#endif
6158+ " sc %0, %2 \n"
6159+ " bnez %0, 4f \n"
6160+ " b 1b \n"
6161+#ifdef CONFIG_PAX_REFCOUNT
6162+ " .set noreorder \n"
6163+ "3: b 5f \n"
6164+ " move %0, %1 \n"
6165+ " .set reorder \n"
6166+ _ASM_EXTABLE(2b, 3b)
6167+#endif
6168+ "4: subu %0, %1, %3 \n"
6169+#ifdef CONFIG_PAX_REFCOUNT
6170+ "5: \n"
6171+#endif
6172+ " .set mips0 \n"
6173+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6174+ : "Ir" (i));
6175+ } else {
6176+ unsigned long flags;
6177+
6178+ raw_local_irq_save(flags);
6179+ __asm__ __volatile__(
6180+ " lw %0, %1 \n"
6181+#ifdef CONFIG_PAX_REFCOUNT
6182+ /* Exception on overflow. */
6183+ "1: sub %0, %2 \n"
6184+#else
6185+ " subu %0, %2 \n"
6186+#endif
6187+ " sw %0, %1 \n"
6188+#ifdef CONFIG_PAX_REFCOUNT
6189+ /* Note: Dest reg is not modified on overflow */
6190+ "2: \n"
6191+ _ASM_EXTABLE(1b, 2b)
6192+#endif
6193+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6194+ raw_local_irq_restore(flags);
6195+ }
6196+
6197+ smp_llsc_mb();
6198+
6199+ return result;
6200+}
6201+static __inline__ int atomic_sub_return_unchecked(int i, atomic_unchecked_t *v)
6202 {
6203 int result;
6204
6205@@ -238,7 +562,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
6206 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6207 * The function returns the old value of @v minus @i.
6208 */
6209-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
6210+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
6211 {
6212 int result;
6213
6214@@ -295,8 +619,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
6215 return result;
6216 }
6217
6218-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
6219-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
6220+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6221+{
6222+ return cmpxchg(&v->counter, old, new);
6223+}
6224+
6225+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
6226+ int new)
6227+{
6228+ return cmpxchg(&(v->counter), old, new);
6229+}
6230+
6231+static inline int atomic_xchg(atomic_t *v, int new)
6232+{
6233+ return xchg(&v->counter, new);
6234+}
6235+
6236+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6237+{
6238+ return xchg(&(v->counter), new);
6239+}
6240
6241 /**
6242 * __atomic_add_unless - add unless the number is a given value
6243@@ -324,6 +666,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6244
6245 #define atomic_dec_return(v) atomic_sub_return(1, (v))
6246 #define atomic_inc_return(v) atomic_add_return(1, (v))
6247+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6248+{
6249+ return atomic_add_return_unchecked(1, v);
6250+}
6251
6252 /*
6253 * atomic_sub_and_test - subtract value from variable and test result
6254@@ -345,6 +691,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6255 * other cases.
6256 */
6257 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
6258+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6259+{
6260+ return atomic_add_return_unchecked(1, v) == 0;
6261+}
6262
6263 /*
6264 * atomic_dec_and_test - decrement by 1 and test
6265@@ -369,6 +719,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6266 * Atomically increments @v by 1.
6267 */
6268 #define atomic_inc(v) atomic_add(1, (v))
6269+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
6270+{
6271+ atomic_add_unchecked(1, v);
6272+}
6273
6274 /*
6275 * atomic_dec - decrement and test
6276@@ -377,6 +731,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6277 * Atomically decrements @v by 1.
6278 */
6279 #define atomic_dec(v) atomic_sub(1, (v))
6280+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
6281+{
6282+ atomic_sub_unchecked(1, v);
6283+}
6284
6285 /*
6286 * atomic_add_negative - add and test if negative
6287@@ -398,14 +756,30 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6288 * @v: pointer of type atomic64_t
6289 *
6290 */
6291-#define atomic64_read(v) (*(volatile long *)&(v)->counter)
6292+static inline long atomic64_read(const atomic64_t *v)
6293+{
6294+ return (*(volatile const long *) &v->counter);
6295+}
6296+
6297+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6298+{
6299+ return (*(volatile const long *) &v->counter);
6300+}
6301
6302 /*
6303 * atomic64_set - set atomic variable
6304 * @v: pointer of type atomic64_t
6305 * @i: required value
6306 */
6307-#define atomic64_set(v, i) ((v)->counter = (i))
6308+static inline void atomic64_set(atomic64_t *v, long i)
6309+{
6310+ v->counter = i;
6311+}
6312+
6313+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6314+{
6315+ v->counter = i;
6316+}
6317
6318 /*
6319 * atomic64_add - add integer to atomic variable
6320@@ -414,7 +788,66 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6321 *
6322 * Atomically adds @i to @v.
6323 */
6324-static __inline__ void atomic64_add(long i, atomic64_t * v)
6325+static __inline__ void atomic64_add(long i, atomic64_t *v)
6326+{
6327+ long temp;
6328+
6329+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6330+ __asm__ __volatile__(
6331+ " .set mips3 \n"
6332+ "1: lld %0, %1 # atomic64_add \n"
6333+#ifdef CONFIG_PAX_REFCOUNT
6334+ /* Exception on overflow. */
6335+ "2: dadd %0, %2 \n"
6336+#else
6337+ " daddu %0, %2 \n"
6338+#endif
6339+ " scd %0, %1 \n"
6340+ " beqzl %0, 1b \n"
6341+#ifdef CONFIG_PAX_REFCOUNT
6342+ "3: \n"
6343+ _ASM_EXTABLE(2b, 3b)
6344+#endif
6345+ " .set mips0 \n"
6346+ : "=&r" (temp), "+m" (v->counter)
6347+ : "Ir" (i));
6348+ } else if (kernel_uses_llsc) {
6349+ __asm__ __volatile__(
6350+ " .set mips3 \n"
6351+ "1: lld %0, %1 # atomic64_add \n"
6352+#ifdef CONFIG_PAX_REFCOUNT
6353+ /* Exception on overflow. */
6354+ "2: dadd %0, %2 \n"
6355+#else
6356+ " daddu %0, %2 \n"
6357+#endif
6358+ " scd %0, %1 \n"
6359+ " beqz %0, 1b \n"
6360+#ifdef CONFIG_PAX_REFCOUNT
6361+ "3: \n"
6362+ _ASM_EXTABLE(2b, 3b)
6363+#endif
6364+ " .set mips0 \n"
6365+ : "=&r" (temp), "+m" (v->counter)
6366+ : "Ir" (i));
6367+ } else {
6368+ unsigned long flags;
6369+
6370+ raw_local_irq_save(flags);
6371+ __asm__ __volatile__(
6372+#ifdef CONFIG_PAX_REFCOUNT
6373+ /* Exception on overflow. */
6374+ "1: dadd %0, %1 \n"
6375+ "2: \n"
6376+ _ASM_EXTABLE(1b, 2b)
6377+#else
6378+ " daddu %0, %1 \n"
6379+#endif
6380+ : "+r" (v->counter) : "Ir" (i));
6381+ raw_local_irq_restore(flags);
6382+ }
6383+}
6384+static __inline__ void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6385 {
6386 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6387 long temp;
6388@@ -457,7 +890,67 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
6389 *
6390 * Atomically subtracts @i from @v.
6391 */
6392-static __inline__ void atomic64_sub(long i, atomic64_t * v)
6393+static __inline__ void atomic64_sub(long i, atomic64_t *v)
6394+{
6395+ long temp;
6396+
6397+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6398+ __asm__ __volatile__(
6399+ " .set mips3 \n"
6400+ "1: lld %0, %1 # atomic64_sub \n"
6401+#ifdef CONFIG_PAX_REFCOUNT
6402+ /* Exception on overflow. */
6403+ "2: dsub %0, %2 \n"
6404+#else
6405+ " dsubu %0, %2 \n"
6406+#endif
6407+ " scd %0, %1 \n"
6408+ " beqzl %0, 1b \n"
6409+#ifdef CONFIG_PAX_REFCOUNT
6410+ "3: \n"
6411+ _ASM_EXTABLE(2b, 3b)
6412+#endif
6413+ " .set mips0 \n"
6414+ : "=&r" (temp), "+m" (v->counter)
6415+ : "Ir" (i));
6416+ } else if (kernel_uses_llsc) {
6417+ __asm__ __volatile__(
6418+ " .set mips3 \n"
6419+ "1: lld %0, %1 # atomic64_sub \n"
6420+#ifdef CONFIG_PAX_REFCOUNT
6421+ /* Exception on overflow. */
6422+ "2: dsub %0, %2 \n"
6423+#else
6424+ " dsubu %0, %2 \n"
6425+#endif
6426+ " scd %0, %1 \n"
6427+ " beqz %0, 1b \n"
6428+#ifdef CONFIG_PAX_REFCOUNT
6429+ "3: \n"
6430+ _ASM_EXTABLE(2b, 3b)
6431+#endif
6432+ " .set mips0 \n"
6433+ : "=&r" (temp), "+m" (v->counter)
6434+ : "Ir" (i));
6435+ } else {
6436+ unsigned long flags;
6437+
6438+ raw_local_irq_save(flags);
6439+ __asm__ __volatile__(
6440+#ifdef CONFIG_PAX_REFCOUNT
6441+ /* Exception on overflow. */
6442+ "1: dsub %0, %1 \n"
6443+ "2: \n"
6444+ _ASM_EXTABLE(1b, 2b)
6445+#else
6446+ " dsubu %0, %1 \n"
6447+#endif
6448+ : "+r" (v->counter) : "Ir" (i));
6449+ raw_local_irq_restore(flags);
6450+ }
6451+}
6452+
6453+static __inline__ void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6454 {
6455 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6456 long temp;
6457@@ -496,7 +989,93 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
6458 /*
6459 * Same as above, but return the result value
6460 */
6461-static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6462+static __inline__ long atomic64_add_return(long i, atomic64_t *v)
6463+{
6464+ long result;
6465+ long temp;
6466+
6467+ smp_mb__before_llsc();
6468+
6469+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6470+ __asm__ __volatile__(
6471+ " .set mips3 \n"
6472+ "1: lld %1, %2 # atomic64_add_return \n"
6473+#ifdef CONFIG_PAX_REFCOUNT
6474+ "2: dadd %0, %1, %3 \n"
6475+#else
6476+ " daddu %0, %1, %3 \n"
6477+#endif
6478+ " scd %0, %2 \n"
6479+ " beqzl %0, 1b \n"
6480+#ifdef CONFIG_PAX_REFCOUNT
6481+ " b 4f \n"
6482+ " .set noreorder \n"
6483+ "3: b 5f \n"
6484+ " move %0, %1 \n"
6485+ " .set reorder \n"
6486+ _ASM_EXTABLE(2b, 3b)
6487+#endif
6488+ "4: daddu %0, %1, %3 \n"
6489+#ifdef CONFIG_PAX_REFCOUNT
6490+ "5: \n"
6491+#endif
6492+ " .set mips0 \n"
6493+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6494+ : "Ir" (i));
6495+ } else if (kernel_uses_llsc) {
6496+ __asm__ __volatile__(
6497+ " .set mips3 \n"
6498+ "1: lld %1, %2 # atomic64_add_return \n"
6499+#ifdef CONFIG_PAX_REFCOUNT
6500+ "2: dadd %0, %1, %3 \n"
6501+#else
6502+ " daddu %0, %1, %3 \n"
6503+#endif
6504+ " scd %0, %2 \n"
6505+ " bnez %0, 4f \n"
6506+ " b 1b \n"
6507+#ifdef CONFIG_PAX_REFCOUNT
6508+ " .set noreorder \n"
6509+ "3: b 5f \n"
6510+ " move %0, %1 \n"
6511+ " .set reorder \n"
6512+ _ASM_EXTABLE(2b, 3b)
6513+#endif
6514+ "4: daddu %0, %1, %3 \n"
6515+#ifdef CONFIG_PAX_REFCOUNT
6516+ "5: \n"
6517+#endif
6518+ " .set mips0 \n"
6519+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6520+ : "Ir" (i), "m" (v->counter)
6521+ : "memory");
6522+ } else {
6523+ unsigned long flags;
6524+
6525+ raw_local_irq_save(flags);
6526+ __asm__ __volatile__(
6527+ " ld %0, %1 \n"
6528+#ifdef CONFIG_PAX_REFCOUNT
6529+ /* Exception on overflow. */
6530+ "1: dadd %0, %2 \n"
6531+#else
6532+ " daddu %0, %2 \n"
6533+#endif
6534+ " sd %0, %1 \n"
6535+#ifdef CONFIG_PAX_REFCOUNT
6536+ /* Note: Dest reg is not modified on overflow */
6537+ "2: \n"
6538+ _ASM_EXTABLE(1b, 2b)
6539+#endif
6540+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6541+ raw_local_irq_restore(flags);
6542+ }
6543+
6544+ smp_llsc_mb();
6545+
6546+ return result;
6547+}
6548+static __inline__ long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6549 {
6550 long result;
6551
6552@@ -546,7 +1125,97 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6553 return result;
6554 }
6555
6556-static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6557+static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
6558+{
6559+ long result;
6560+ long temp;
6561+
6562+ smp_mb__before_llsc();
6563+
6564+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6565+ long temp;
6566+
6567+ __asm__ __volatile__(
6568+ " .set mips3 \n"
6569+ "1: lld %1, %2 # atomic64_sub_return \n"
6570+#ifdef CONFIG_PAX_REFCOUNT
6571+ "2: dsub %0, %1, %3 \n"
6572+#else
6573+ " dsubu %0, %1, %3 \n"
6574+#endif
6575+ " scd %0, %2 \n"
6576+ " beqzl %0, 1b \n"
6577+#ifdef CONFIG_PAX_REFCOUNT
6578+ " b 4f \n"
6579+ " .set noreorder \n"
6580+ "3: b 5f \n"
6581+ " move %0, %1 \n"
6582+ " .set reorder \n"
6583+ _ASM_EXTABLE(2b, 3b)
6584+#endif
6585+ "4: dsubu %0, %1, %3 \n"
6586+#ifdef CONFIG_PAX_REFCOUNT
6587+ "5: \n"
6588+#endif
6589+ " .set mips0 \n"
6590+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6591+ : "Ir" (i), "m" (v->counter)
6592+ : "memory");
6593+ } else if (kernel_uses_llsc) {
6594+ __asm__ __volatile__(
6595+ " .set mips3 \n"
6596+ "1: lld %1, %2 # atomic64_sub_return \n"
6597+#ifdef CONFIG_PAX_REFCOUNT
6598+ "2: dsub %0, %1, %3 \n"
6599+#else
6600+ " dsubu %0, %1, %3 \n"
6601+#endif
6602+ " scd %0, %2 \n"
6603+ " bnez %0, 4f \n"
6604+ " b 1b \n"
6605+#ifdef CONFIG_PAX_REFCOUNT
6606+ " .set noreorder \n"
6607+ "3: b 5f \n"
6608+ " move %0, %1 \n"
6609+ " .set reorder \n"
6610+ _ASM_EXTABLE(2b, 3b)
6611+#endif
6612+ "4: dsubu %0, %1, %3 \n"
6613+#ifdef CONFIG_PAX_REFCOUNT
6614+ "5: \n"
6615+#endif
6616+ " .set mips0 \n"
6617+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6618+ : "Ir" (i), "m" (v->counter)
6619+ : "memory");
6620+ } else {
6621+ unsigned long flags;
6622+
6623+ raw_local_irq_save(flags);
6624+ __asm__ __volatile__(
6625+ " ld %0, %1 \n"
6626+#ifdef CONFIG_PAX_REFCOUNT
6627+ /* Exception on overflow. */
6628+ "1: dsub %0, %2 \n"
6629+#else
6630+ " dsubu %0, %2 \n"
6631+#endif
6632+ " sd %0, %1 \n"
6633+#ifdef CONFIG_PAX_REFCOUNT
6634+ /* Note: Dest reg is not modified on overflow */
6635+ "2: \n"
6636+ _ASM_EXTABLE(1b, 2b)
6637+#endif
6638+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6639+ raw_local_irq_restore(flags);
6640+ }
6641+
6642+ smp_llsc_mb();
6643+
6644+ return result;
6645+}
6646+
6647+static __inline__ long atomic64_sub_return_unchecked(long i, atomic64_unchecked_t *v)
6648 {
6649 long result;
6650
6651@@ -605,7 +1274,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6652 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6653 * The function returns the old value of @v minus @i.
6654 */
6655-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6656+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6657 {
6658 long result;
6659
6660@@ -662,9 +1331,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6661 return result;
6662 }
6663
6664-#define atomic64_cmpxchg(v, o, n) \
6665- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6666-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6667+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6668+{
6669+ return cmpxchg(&v->counter, old, new);
6670+}
6671+
6672+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6673+ long new)
6674+{
6675+ return cmpxchg(&(v->counter), old, new);
6676+}
6677+
6678+static inline long atomic64_xchg(atomic64_t *v, long new)
6679+{
6680+ return xchg(&v->counter, new);
6681+}
6682+
6683+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6684+{
6685+ return xchg(&(v->counter), new);
6686+}
6687
6688 /**
6689 * atomic64_add_unless - add unless the number is a given value
6690@@ -694,6 +1380,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6691
6692 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6693 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6694+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6695
6696 /*
6697 * atomic64_sub_and_test - subtract value from variable and test result
6698@@ -715,6 +1402,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6699 * other cases.
6700 */
6701 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6702+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6703
6704 /*
6705 * atomic64_dec_and_test - decrement by 1 and test
6706@@ -739,6 +1427,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6707 * Atomically increments @v by 1.
6708 */
6709 #define atomic64_inc(v) atomic64_add(1, (v))
6710+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6711
6712 /*
6713 * atomic64_dec - decrement and test
6714@@ -747,6 +1436,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6715 * Atomically decrements @v by 1.
6716 */
6717 #define atomic64_dec(v) atomic64_sub(1, (v))
6718+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6719
6720 /*
6721 * atomic64_add_negative - add and test if negative
6722diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
6723index d0101dd..266982c 100644
6724--- a/arch/mips/include/asm/barrier.h
6725+++ b/arch/mips/include/asm/barrier.h
6726@@ -184,7 +184,7 @@
6727 do { \
6728 compiletime_assert_atomic_type(*p); \
6729 smp_mb(); \
6730- ACCESS_ONCE(*p) = (v); \
6731+ ACCESS_ONCE_RW(*p) = (v); \
6732 } while (0)
6733
6734 #define smp_load_acquire(p) \
6735diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6736index b4db69f..8f3b093 100644
6737--- a/arch/mips/include/asm/cache.h
6738+++ b/arch/mips/include/asm/cache.h
6739@@ -9,10 +9,11 @@
6740 #ifndef _ASM_CACHE_H
6741 #define _ASM_CACHE_H
6742
6743+#include <linux/const.h>
6744 #include <kmalloc.h>
6745
6746 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6747-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6748+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6749
6750 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6751 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6752diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6753index 1d38fe0..9beabc9 100644
6754--- a/arch/mips/include/asm/elf.h
6755+++ b/arch/mips/include/asm/elf.h
6756@@ -381,13 +381,16 @@ extern const char *__elf_platform;
6757 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6758 #endif
6759
6760+#ifdef CONFIG_PAX_ASLR
6761+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6762+
6763+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6764+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6765+#endif
6766+
6767 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6768 struct linux_binprm;
6769 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6770 int uses_interp);
6771
6772-struct mm_struct;
6773-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6774-#define arch_randomize_brk arch_randomize_brk
6775-
6776 #endif /* _ASM_ELF_H */
6777diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6778index c1f6afa..38cc6e9 100644
6779--- a/arch/mips/include/asm/exec.h
6780+++ b/arch/mips/include/asm/exec.h
6781@@ -12,6 +12,6 @@
6782 #ifndef _ASM_EXEC_H
6783 #define _ASM_EXEC_H
6784
6785-extern unsigned long arch_align_stack(unsigned long sp);
6786+#define arch_align_stack(x) ((x) & ~0xfUL)
6787
6788 #endif /* _ASM_EXEC_H */
6789diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
6790index 9e8ef59..1139d6b 100644
6791--- a/arch/mips/include/asm/hw_irq.h
6792+++ b/arch/mips/include/asm/hw_irq.h
6793@@ -10,7 +10,7 @@
6794
6795 #include <linux/atomic.h>
6796
6797-extern atomic_t irq_err_count;
6798+extern atomic_unchecked_t irq_err_count;
6799
6800 /*
6801 * interrupt-retrigger: NOP for now. This may not be appropriate for all
6802diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6803index 46dfc3c..a16b13a 100644
6804--- a/arch/mips/include/asm/local.h
6805+++ b/arch/mips/include/asm/local.h
6806@@ -12,15 +12,25 @@ typedef struct
6807 atomic_long_t a;
6808 } local_t;
6809
6810+typedef struct {
6811+ atomic_long_unchecked_t a;
6812+} local_unchecked_t;
6813+
6814 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6815
6816 #define local_read(l) atomic_long_read(&(l)->a)
6817+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6818 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6819+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6820
6821 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6822+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6823 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6824+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6825 #define local_inc(l) atomic_long_inc(&(l)->a)
6826+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6827 #define local_dec(l) atomic_long_dec(&(l)->a)
6828+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6829
6830 /*
6831 * Same as above, but return the result value
6832@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6833 return result;
6834 }
6835
6836+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6837+{
6838+ unsigned long result;
6839+
6840+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6841+ unsigned long temp;
6842+
6843+ __asm__ __volatile__(
6844+ " .set mips3 \n"
6845+ "1:" __LL "%1, %2 # local_add_return \n"
6846+ " addu %0, %1, %3 \n"
6847+ __SC "%0, %2 \n"
6848+ " beqzl %0, 1b \n"
6849+ " addu %0, %1, %3 \n"
6850+ " .set mips0 \n"
6851+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6852+ : "Ir" (i), "m" (l->a.counter)
6853+ : "memory");
6854+ } else if (kernel_uses_llsc) {
6855+ unsigned long temp;
6856+
6857+ __asm__ __volatile__(
6858+ " .set mips3 \n"
6859+ "1:" __LL "%1, %2 # local_add_return \n"
6860+ " addu %0, %1, %3 \n"
6861+ __SC "%0, %2 \n"
6862+ " beqz %0, 1b \n"
6863+ " addu %0, %1, %3 \n"
6864+ " .set mips0 \n"
6865+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6866+ : "Ir" (i), "m" (l->a.counter)
6867+ : "memory");
6868+ } else {
6869+ unsigned long flags;
6870+
6871+ local_irq_save(flags);
6872+ result = l->a.counter;
6873+ result += i;
6874+ l->a.counter = result;
6875+ local_irq_restore(flags);
6876+ }
6877+
6878+ return result;
6879+}
6880+
6881 static __inline__ long local_sub_return(long i, local_t * l)
6882 {
6883 unsigned long result;
6884@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6885
6886 #define local_cmpxchg(l, o, n) \
6887 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6888+#define local_cmpxchg_unchecked(l, o, n) \
6889+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6890 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6891
6892 /**
6893diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6894index 3be8180..c4798d5 100644
6895--- a/arch/mips/include/asm/page.h
6896+++ b/arch/mips/include/asm/page.h
6897@@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6898 #ifdef CONFIG_CPU_MIPS32
6899 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6900 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6901- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6902+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6903 #else
6904 typedef struct { unsigned long long pte; } pte_t;
6905 #define pte_val(x) ((x).pte)
6906diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6907index b336037..5b874cc 100644
6908--- a/arch/mips/include/asm/pgalloc.h
6909+++ b/arch/mips/include/asm/pgalloc.h
6910@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6911 {
6912 set_pud(pud, __pud((unsigned long)pmd));
6913 }
6914+
6915+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6916+{
6917+ pud_populate(mm, pud, pmd);
6918+}
6919 #endif
6920
6921 /*
6922diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
6923index df49a30..c0d3dd6 100644
6924--- a/arch/mips/include/asm/pgtable.h
6925+++ b/arch/mips/include/asm/pgtable.h
6926@@ -20,6 +20,9 @@
6927 #include <asm/io.h>
6928 #include <asm/pgtable-bits.h>
6929
6930+#define ktla_ktva(addr) (addr)
6931+#define ktva_ktla(addr) (addr)
6932+
6933 struct mm_struct;
6934 struct vm_area_struct;
6935
6936diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6937index 7de8658..c109224 100644
6938--- a/arch/mips/include/asm/thread_info.h
6939+++ b/arch/mips/include/asm/thread_info.h
6940@@ -105,6 +105,9 @@ static inline struct thread_info *current_thread_info(void)
6941 #define TIF_SECCOMP 4 /* secure computing */
6942 #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
6943 #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
6944+/* li takes a 32bit immediate */
6945+#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
6946+
6947 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
6948 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
6949 #define TIF_NOHZ 19 /* in adaptive nohz mode */
6950@@ -138,14 +141,16 @@ static inline struct thread_info *current_thread_info(void)
6951 #define _TIF_USEDMSA (1<<TIF_USEDMSA)
6952 #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
6953 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6954+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6955
6956 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6957 _TIF_SYSCALL_AUDIT | \
6958- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
6959+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
6960+ _TIF_GRSEC_SETXID)
6961
6962 /* work to do in syscall_trace_leave() */
6963 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6964- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6965+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6966
6967 /* work to do on interrupt/exception return */
6968 #define _TIF_WORK_MASK \
6969@@ -153,7 +158,7 @@ static inline struct thread_info *current_thread_info(void)
6970 /* work to do on any return to u-space */
6971 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
6972 _TIF_WORK_SYSCALL_EXIT | \
6973- _TIF_SYSCALL_TRACEPOINT)
6974+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6975
6976 /*
6977 * We stash processor id into a COP0 register to retrieve it fast
6978diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
6979index a109510..94ee3f6 100644
6980--- a/arch/mips/include/asm/uaccess.h
6981+++ b/arch/mips/include/asm/uaccess.h
6982@@ -130,6 +130,7 @@ extern u64 __ua_limit;
6983 __ok == 0; \
6984 })
6985
6986+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
6987 #define access_ok(type, addr, size) \
6988 likely(__access_ok((addr), (size), __access_mask))
6989
6990diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6991index 1188e00..41cf144 100644
6992--- a/arch/mips/kernel/binfmt_elfn32.c
6993+++ b/arch/mips/kernel/binfmt_elfn32.c
6994@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6995 #undef ELF_ET_DYN_BASE
6996 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6997
6998+#ifdef CONFIG_PAX_ASLR
6999+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
7000+
7001+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7002+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7003+#endif
7004+
7005 #include <asm/processor.h>
7006 #include <linux/module.h>
7007 #include <linux/elfcore.h>
7008diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
7009index 9287678..f870e47 100644
7010--- a/arch/mips/kernel/binfmt_elfo32.c
7011+++ b/arch/mips/kernel/binfmt_elfo32.c
7012@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
7013 #undef ELF_ET_DYN_BASE
7014 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
7015
7016+#ifdef CONFIG_PAX_ASLR
7017+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
7018+
7019+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7020+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7021+#endif
7022+
7023 #include <asm/processor.h>
7024
7025 #include <linux/module.h>
7026diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
7027index 50b3648..c2f3cec 100644
7028--- a/arch/mips/kernel/i8259.c
7029+++ b/arch/mips/kernel/i8259.c
7030@@ -201,7 +201,7 @@ spurious_8259A_irq:
7031 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
7032 spurious_irq_mask |= irqmask;
7033 }
7034- atomic_inc(&irq_err_count);
7035+ atomic_inc_unchecked(&irq_err_count);
7036 /*
7037 * Theoretically we do not have to handle this IRQ,
7038 * but in Linux this does not cause problems and is
7039diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
7040index 44a1f79..2bd6aa3 100644
7041--- a/arch/mips/kernel/irq-gt641xx.c
7042+++ b/arch/mips/kernel/irq-gt641xx.c
7043@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
7044 }
7045 }
7046
7047- atomic_inc(&irq_err_count);
7048+ atomic_inc_unchecked(&irq_err_count);
7049 }
7050
7051 void __init gt641xx_irq_init(void)
7052diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
7053index d2bfbc2..a8eacd2 100644
7054--- a/arch/mips/kernel/irq.c
7055+++ b/arch/mips/kernel/irq.c
7056@@ -76,17 +76,17 @@ void ack_bad_irq(unsigned int irq)
7057 printk("unexpected IRQ # %d\n", irq);
7058 }
7059
7060-atomic_t irq_err_count;
7061+atomic_unchecked_t irq_err_count;
7062
7063 int arch_show_interrupts(struct seq_file *p, int prec)
7064 {
7065- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
7066+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
7067 return 0;
7068 }
7069
7070 asmlinkage void spurious_interrupt(void)
7071 {
7072- atomic_inc(&irq_err_count);
7073+ atomic_inc_unchecked(&irq_err_count);
7074 }
7075
7076 void __init init_IRQ(void)
7077@@ -109,7 +109,10 @@ void __init init_IRQ(void)
7078 #endif
7079 }
7080
7081+
7082 #ifdef DEBUG_STACKOVERFLOW
7083+extern void gr_handle_kernel_exploit(void);
7084+
7085 static inline void check_stack_overflow(void)
7086 {
7087 unsigned long sp;
7088@@ -125,6 +128,7 @@ static inline void check_stack_overflow(void)
7089 printk("do_IRQ: stack overflow: %ld\n",
7090 sp - sizeof(struct thread_info));
7091 dump_stack();
7092+ gr_handle_kernel_exploit();
7093 }
7094 }
7095 #else
7096diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
7097index 0614717..002fa43 100644
7098--- a/arch/mips/kernel/pm-cps.c
7099+++ b/arch/mips/kernel/pm-cps.c
7100@@ -172,7 +172,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
7101 nc_core_ready_count = nc_addr;
7102
7103 /* Ensure ready_count is zero-initialised before the assembly runs */
7104- ACCESS_ONCE(*nc_core_ready_count) = 0;
7105+ ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
7106 coupled_barrier(&per_cpu(pm_barrier, core), online);
7107
7108 /* Run the generated entry code */
7109diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
7110index 636b074..8fbb91f 100644
7111--- a/arch/mips/kernel/process.c
7112+++ b/arch/mips/kernel/process.c
7113@@ -520,15 +520,3 @@ unsigned long get_wchan(struct task_struct *task)
7114 out:
7115 return pc;
7116 }
7117-
7118-/*
7119- * Don't forget that the stack pointer must be aligned on a 8 bytes
7120- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
7121- */
7122-unsigned long arch_align_stack(unsigned long sp)
7123-{
7124- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7125- sp -= get_random_int() & ~PAGE_MASK;
7126-
7127- return sp & ALMASK;
7128-}
7129diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
7130index 645b3c4..909c75a 100644
7131--- a/arch/mips/kernel/ptrace.c
7132+++ b/arch/mips/kernel/ptrace.c
7133@@ -761,6 +761,10 @@ long arch_ptrace(struct task_struct *child, long request,
7134 return ret;
7135 }
7136
7137+#ifdef CONFIG_GRKERNSEC_SETXID
7138+extern void gr_delayed_cred_worker(void);
7139+#endif
7140+
7141 /*
7142 * Notification of system call entry/exit
7143 * - triggered by current->work.syscall_trace
7144@@ -777,6 +781,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
7145 tracehook_report_syscall_entry(regs))
7146 ret = -1;
7147
7148+#ifdef CONFIG_GRKERNSEC_SETXID
7149+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7150+ gr_delayed_cred_worker();
7151+#endif
7152+
7153 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
7154 trace_sys_enter(regs, regs->regs[2]);
7155
7156diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
7157index 07fc524..b9d7f28 100644
7158--- a/arch/mips/kernel/reset.c
7159+++ b/arch/mips/kernel/reset.c
7160@@ -13,6 +13,7 @@
7161 #include <linux/reboot.h>
7162
7163 #include <asm/reboot.h>
7164+#include <asm/bug.h>
7165
7166 /*
7167 * Urgs ... Too many MIPS machines to handle this in a generic way.
7168@@ -29,16 +30,19 @@ void machine_restart(char *command)
7169 {
7170 if (_machine_restart)
7171 _machine_restart(command);
7172+ BUG();
7173 }
7174
7175 void machine_halt(void)
7176 {
7177 if (_machine_halt)
7178 _machine_halt();
7179+ BUG();
7180 }
7181
7182 void machine_power_off(void)
7183 {
7184 if (pm_power_off)
7185 pm_power_off();
7186+ BUG();
7187 }
7188diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
7189index 2242bdd..b284048 100644
7190--- a/arch/mips/kernel/sync-r4k.c
7191+++ b/arch/mips/kernel/sync-r4k.c
7192@@ -18,8 +18,8 @@
7193 #include <asm/mipsregs.h>
7194
7195 static atomic_t count_start_flag = ATOMIC_INIT(0);
7196-static atomic_t count_count_start = ATOMIC_INIT(0);
7197-static atomic_t count_count_stop = ATOMIC_INIT(0);
7198+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
7199+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
7200 static atomic_t count_reference = ATOMIC_INIT(0);
7201
7202 #define COUNTON 100
7203@@ -58,13 +58,13 @@ void synchronise_count_master(int cpu)
7204
7205 for (i = 0; i < NR_LOOPS; i++) {
7206 /* slaves loop on '!= 2' */
7207- while (atomic_read(&count_count_start) != 1)
7208+ while (atomic_read_unchecked(&count_count_start) != 1)
7209 mb();
7210- atomic_set(&count_count_stop, 0);
7211+ atomic_set_unchecked(&count_count_stop, 0);
7212 smp_wmb();
7213
7214 /* this lets the slaves write their count register */
7215- atomic_inc(&count_count_start);
7216+ atomic_inc_unchecked(&count_count_start);
7217
7218 /*
7219 * Everyone initialises count in the last loop:
7220@@ -75,11 +75,11 @@ void synchronise_count_master(int cpu)
7221 /*
7222 * Wait for all slaves to leave the synchronization point:
7223 */
7224- while (atomic_read(&count_count_stop) != 1)
7225+ while (atomic_read_unchecked(&count_count_stop) != 1)
7226 mb();
7227- atomic_set(&count_count_start, 0);
7228+ atomic_set_unchecked(&count_count_start, 0);
7229 smp_wmb();
7230- atomic_inc(&count_count_stop);
7231+ atomic_inc_unchecked(&count_count_stop);
7232 }
7233 /* Arrange for an interrupt in a short while */
7234 write_c0_compare(read_c0_count() + COUNTON);
7235@@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu)
7236 initcount = atomic_read(&count_reference);
7237
7238 for (i = 0; i < NR_LOOPS; i++) {
7239- atomic_inc(&count_count_start);
7240- while (atomic_read(&count_count_start) != 2)
7241+ atomic_inc_unchecked(&count_count_start);
7242+ while (atomic_read_unchecked(&count_count_start) != 2)
7243 mb();
7244
7245 /*
7246@@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu)
7247 if (i == NR_LOOPS-1)
7248 write_c0_count(initcount);
7249
7250- atomic_inc(&count_count_stop);
7251- while (atomic_read(&count_count_stop) != 2)
7252+ atomic_inc_unchecked(&count_count_stop);
7253+ while (atomic_read_unchecked(&count_count_stop) != 2)
7254 mb();
7255 }
7256 /* Arrange for an interrupt in a short while */
7257diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
7258index 22b19c2..c5cc8c4 100644
7259--- a/arch/mips/kernel/traps.c
7260+++ b/arch/mips/kernel/traps.c
7261@@ -688,7 +688,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
7262 siginfo_t info;
7263
7264 prev_state = exception_enter();
7265- die_if_kernel("Integer overflow", regs);
7266+ if (unlikely(!user_mode(regs))) {
7267+
7268+#ifdef CONFIG_PAX_REFCOUNT
7269+ if (fixup_exception(regs)) {
7270+ pax_report_refcount_overflow(regs);
7271+ exception_exit(prev_state);
7272+ return;
7273+ }
7274+#endif
7275+
7276+ die("Integer overflow", regs);
7277+ }
7278
7279 info.si_code = FPE_INTOVF;
7280 info.si_signo = SIGFPE;
7281diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
7282index cd71141..e02c4df 100644
7283--- a/arch/mips/kvm/mips.c
7284+++ b/arch/mips/kvm/mips.c
7285@@ -839,7 +839,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
7286 return r;
7287 }
7288
7289-int kvm_arch_init(void *opaque)
7290+int kvm_arch_init(const void *opaque)
7291 {
7292 if (kvm_mips_callbacks) {
7293 kvm_err("kvm: module already exists\n");
7294diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
7295index becc42b..9e43d4b 100644
7296--- a/arch/mips/mm/fault.c
7297+++ b/arch/mips/mm/fault.c
7298@@ -28,6 +28,23 @@
7299 #include <asm/highmem.h> /* For VMALLOC_END */
7300 #include <linux/kdebug.h>
7301
7302+#ifdef CONFIG_PAX_PAGEEXEC
7303+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7304+{
7305+ unsigned long i;
7306+
7307+ printk(KERN_ERR "PAX: bytes at PC: ");
7308+ for (i = 0; i < 5; i++) {
7309+ unsigned int c;
7310+ if (get_user(c, (unsigned int *)pc+i))
7311+ printk(KERN_CONT "???????? ");
7312+ else
7313+ printk(KERN_CONT "%08x ", c);
7314+ }
7315+ printk("\n");
7316+}
7317+#endif
7318+
7319 /*
7320 * This routine handles page faults. It determines the address,
7321 * and the problem, and then passes it off to one of the appropriate
7322@@ -199,6 +216,14 @@ bad_area:
7323 bad_area_nosemaphore:
7324 /* User mode accesses just cause a SIGSEGV */
7325 if (user_mode(regs)) {
7326+
7327+#ifdef CONFIG_PAX_PAGEEXEC
7328+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
7329+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
7330+ do_group_exit(SIGKILL);
7331+ }
7332+#endif
7333+
7334 tsk->thread.cp0_badvaddr = address;
7335 tsk->thread.error_code = write;
7336 #if 0
7337diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
7338index f1baadd..5472dca 100644
7339--- a/arch/mips/mm/mmap.c
7340+++ b/arch/mips/mm/mmap.c
7341@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7342 struct vm_area_struct *vma;
7343 unsigned long addr = addr0;
7344 int do_color_align;
7345+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7346 struct vm_unmapped_area_info info;
7347
7348 if (unlikely(len > TASK_SIZE))
7349@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7350 do_color_align = 1;
7351
7352 /* requesting a specific address */
7353+
7354+#ifdef CONFIG_PAX_RANDMMAP
7355+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
7356+#endif
7357+
7358 if (addr) {
7359 if (do_color_align)
7360 addr = COLOUR_ALIGN(addr, pgoff);
7361@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7362 addr = PAGE_ALIGN(addr);
7363
7364 vma = find_vma(mm, addr);
7365- if (TASK_SIZE - len >= addr &&
7366- (!vma || addr + len <= vma->vm_start))
7367+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7368 return addr;
7369 }
7370
7371 info.length = len;
7372 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
7373 info.align_offset = pgoff << PAGE_SHIFT;
7374+ info.threadstack_offset = offset;
7375
7376 if (dir == DOWN) {
7377 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
7378@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7379 {
7380 unsigned long random_factor = 0UL;
7381
7382+#ifdef CONFIG_PAX_RANDMMAP
7383+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7384+#endif
7385+
7386 if (current->flags & PF_RANDOMIZE) {
7387 random_factor = get_random_int();
7388 random_factor = random_factor << PAGE_SHIFT;
7389@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7390
7391 if (mmap_is_legacy()) {
7392 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
7393+
7394+#ifdef CONFIG_PAX_RANDMMAP
7395+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7396+ mm->mmap_base += mm->delta_mmap;
7397+#endif
7398+
7399 mm->get_unmapped_area = arch_get_unmapped_area;
7400 } else {
7401 mm->mmap_base = mmap_base(random_factor);
7402+
7403+#ifdef CONFIG_PAX_RANDMMAP
7404+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7405+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7406+#endif
7407+
7408 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7409 }
7410 }
7411
7412-static inline unsigned long brk_rnd(void)
7413-{
7414- unsigned long rnd = get_random_int();
7415-
7416- rnd = rnd << PAGE_SHIFT;
7417- /* 8MB for 32bit, 256MB for 64bit */
7418- if (TASK_IS_32BIT_ADDR)
7419- rnd = rnd & 0x7ffffful;
7420- else
7421- rnd = rnd & 0xffffffful;
7422-
7423- return rnd;
7424-}
7425-
7426-unsigned long arch_randomize_brk(struct mm_struct *mm)
7427-{
7428- unsigned long base = mm->brk;
7429- unsigned long ret;
7430-
7431- ret = PAGE_ALIGN(base + brk_rnd());
7432-
7433- if (ret < mm->brk)
7434- return mm->brk;
7435-
7436- return ret;
7437-}
7438-
7439 int __virt_addr_valid(const volatile void *kaddr)
7440 {
7441 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
7442diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
7443index 9f7ecbd..6e370fc 100644
7444--- a/arch/mips/net/bpf_jit.c
7445+++ b/arch/mips/net/bpf_jit.c
7446@@ -1428,5 +1428,6 @@ void bpf_jit_free(struct bpf_prog *fp)
7447 {
7448 if (fp->jited)
7449 module_free(NULL, fp->bpf_func);
7450- kfree(fp);
7451+
7452+ bpf_prog_unlock_free(fp);
7453 }
7454diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
7455index 59cccd9..f39ac2f 100644
7456--- a/arch/mips/pci/pci-octeon.c
7457+++ b/arch/mips/pci/pci-octeon.c
7458@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
7459
7460
7461 static struct pci_ops octeon_pci_ops = {
7462- octeon_read_config,
7463- octeon_write_config,
7464+ .read = octeon_read_config,
7465+ .write = octeon_write_config,
7466 };
7467
7468 static struct resource octeon_pci_mem_resource = {
7469diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
7470index 5e36c33..eb4a17b 100644
7471--- a/arch/mips/pci/pcie-octeon.c
7472+++ b/arch/mips/pci/pcie-octeon.c
7473@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
7474 }
7475
7476 static struct pci_ops octeon_pcie0_ops = {
7477- octeon_pcie0_read_config,
7478- octeon_pcie0_write_config,
7479+ .read = octeon_pcie0_read_config,
7480+ .write = octeon_pcie0_write_config,
7481 };
7482
7483 static struct resource octeon_pcie0_mem_resource = {
7484@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = {
7485 };
7486
7487 static struct pci_ops octeon_pcie1_ops = {
7488- octeon_pcie1_read_config,
7489- octeon_pcie1_write_config,
7490+ .read = octeon_pcie1_read_config,
7491+ .write = octeon_pcie1_write_config,
7492 };
7493
7494 static struct resource octeon_pcie1_mem_resource = {
7495@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = {
7496 };
7497
7498 static struct pci_ops octeon_dummy_ops = {
7499- octeon_dummy_read_config,
7500- octeon_dummy_write_config,
7501+ .read = octeon_dummy_read_config,
7502+ .write = octeon_dummy_write_config,
7503 };
7504
7505 static struct resource octeon_dummy_mem_resource = {
7506diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
7507index a2358b4..7cead4f 100644
7508--- a/arch/mips/sgi-ip27/ip27-nmi.c
7509+++ b/arch/mips/sgi-ip27/ip27-nmi.c
7510@@ -187,9 +187,9 @@ void
7511 cont_nmi_dump(void)
7512 {
7513 #ifndef REAL_NMI_SIGNAL
7514- static atomic_t nmied_cpus = ATOMIC_INIT(0);
7515+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
7516
7517- atomic_inc(&nmied_cpus);
7518+ atomic_inc_unchecked(&nmied_cpus);
7519 #endif
7520 /*
7521 * Only allow 1 cpu to proceed
7522@@ -233,7 +233,7 @@ cont_nmi_dump(void)
7523 udelay(10000);
7524 }
7525 #else
7526- while (atomic_read(&nmied_cpus) != num_online_cpus());
7527+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
7528 #endif
7529
7530 /*
7531diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
7532index a046b30..6799527 100644
7533--- a/arch/mips/sni/rm200.c
7534+++ b/arch/mips/sni/rm200.c
7535@@ -270,7 +270,7 @@ spurious_8259A_irq:
7536 "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
7537 spurious_irq_mask |= irqmask;
7538 }
7539- atomic_inc(&irq_err_count);
7540+ atomic_inc_unchecked(&irq_err_count);
7541 /*
7542 * Theoretically we do not have to handle this IRQ,
7543 * but in Linux this does not cause problems and is
7544diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
7545index 41e873b..34d33a7 100644
7546--- a/arch/mips/vr41xx/common/icu.c
7547+++ b/arch/mips/vr41xx/common/icu.c
7548@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
7549
7550 printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
7551
7552- atomic_inc(&irq_err_count);
7553+ atomic_inc_unchecked(&irq_err_count);
7554
7555 return -1;
7556 }
7557diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
7558index ae0e4ee..e8f0692 100644
7559--- a/arch/mips/vr41xx/common/irq.c
7560+++ b/arch/mips/vr41xx/common/irq.c
7561@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
7562 irq_cascade_t *cascade;
7563
7564 if (irq >= NR_IRQS) {
7565- atomic_inc(&irq_err_count);
7566+ atomic_inc_unchecked(&irq_err_count);
7567 return;
7568 }
7569
7570@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
7571 ret = cascade->get_irq(irq);
7572 irq = ret;
7573 if (ret < 0)
7574- atomic_inc(&irq_err_count);
7575+ atomic_inc_unchecked(&irq_err_count);
7576 else
7577 irq_dispatch(irq);
7578 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
7579diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7580index 967d144..db12197 100644
7581--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
7582+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7583@@ -11,12 +11,14 @@
7584 #ifndef _ASM_PROC_CACHE_H
7585 #define _ASM_PROC_CACHE_H
7586
7587+#include <linux/const.h>
7588+
7589 /* L1 cache */
7590
7591 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7592 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
7593-#define L1_CACHE_BYTES 16 /* bytes per entry */
7594 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
7595+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7596 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
7597
7598 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7599diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7600index bcb5df2..84fabd2 100644
7601--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7602+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7603@@ -16,13 +16,15 @@
7604 #ifndef _ASM_PROC_CACHE_H
7605 #define _ASM_PROC_CACHE_H
7606
7607+#include <linux/const.h>
7608+
7609 /*
7610 * L1 cache
7611 */
7612 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7613 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
7614-#define L1_CACHE_BYTES 32 /* bytes per entry */
7615 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7616+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7617 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7618
7619 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7620diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7621index 4ce7a01..449202a 100644
7622--- a/arch/openrisc/include/asm/cache.h
7623+++ b/arch/openrisc/include/asm/cache.h
7624@@ -19,11 +19,13 @@
7625 #ifndef __ASM_OPENRISC_CACHE_H
7626 #define __ASM_OPENRISC_CACHE_H
7627
7628+#include <linux/const.h>
7629+
7630 /* FIXME: How can we replace these with values from the CPU...
7631 * they shouldn't be hard-coded!
7632 */
7633
7634-#define L1_CACHE_BYTES 16
7635 #define L1_CACHE_SHIFT 4
7636+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7637
7638 #endif /* __ASM_OPENRISC_CACHE_H */
7639diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7640index 0be2db2..1b0f26d 100644
7641--- a/arch/parisc/include/asm/atomic.h
7642+++ b/arch/parisc/include/asm/atomic.h
7643@@ -248,6 +248,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7644 return dec;
7645 }
7646
7647+#define atomic64_read_unchecked(v) atomic64_read(v)
7648+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7649+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7650+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7651+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7652+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7653+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7654+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7655+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7656+
7657 #endif /* !CONFIG_64BIT */
7658
7659
7660diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7661index 47f11c7..3420df2 100644
7662--- a/arch/parisc/include/asm/cache.h
7663+++ b/arch/parisc/include/asm/cache.h
7664@@ -5,6 +5,7 @@
7665 #ifndef __ARCH_PARISC_CACHE_H
7666 #define __ARCH_PARISC_CACHE_H
7667
7668+#include <linux/const.h>
7669
7670 /*
7671 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
7672@@ -15,13 +16,13 @@
7673 * just ruin performance.
7674 */
7675 #ifdef CONFIG_PA20
7676-#define L1_CACHE_BYTES 64
7677 #define L1_CACHE_SHIFT 6
7678 #else
7679-#define L1_CACHE_BYTES 32
7680 #define L1_CACHE_SHIFT 5
7681 #endif
7682
7683+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7684+
7685 #ifndef __ASSEMBLY__
7686
7687 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7688diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
7689index 3391d06..c23a2cc 100644
7690--- a/arch/parisc/include/asm/elf.h
7691+++ b/arch/parisc/include/asm/elf.h
7692@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
7693
7694 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
7695
7696+#ifdef CONFIG_PAX_ASLR
7697+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7698+
7699+#define PAX_DELTA_MMAP_LEN 16
7700+#define PAX_DELTA_STACK_LEN 16
7701+#endif
7702+
7703 /* This yields a mask that user programs can use to figure out what
7704 instruction set this CPU supports. This could be done in user space,
7705 but it's not easy, and we've already done it here. */
7706diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
7707index f213f5b..0af3e8e 100644
7708--- a/arch/parisc/include/asm/pgalloc.h
7709+++ b/arch/parisc/include/asm/pgalloc.h
7710@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7711 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
7712 }
7713
7714+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7715+{
7716+ pgd_populate(mm, pgd, pmd);
7717+}
7718+
7719 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
7720 {
7721 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
7722@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7723 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
7724 #define pmd_free(mm, x) do { } while (0)
7725 #define pgd_populate(mm, pmd, pte) BUG()
7726+#define pgd_populate_kernel(mm, pmd, pte) BUG()
7727
7728 #endif
7729
7730diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
7731index 22b89d1..ce34230 100644
7732--- a/arch/parisc/include/asm/pgtable.h
7733+++ b/arch/parisc/include/asm/pgtable.h
7734@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
7735 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
7736 #define PAGE_COPY PAGE_EXECREAD
7737 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
7738+
7739+#ifdef CONFIG_PAX_PAGEEXEC
7740+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
7741+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7742+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7743+#else
7744+# define PAGE_SHARED_NOEXEC PAGE_SHARED
7745+# define PAGE_COPY_NOEXEC PAGE_COPY
7746+# define PAGE_READONLY_NOEXEC PAGE_READONLY
7747+#endif
7748+
7749 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
7750 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
7751 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
7752diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
7753index 4006964..fcb3cc2 100644
7754--- a/arch/parisc/include/asm/uaccess.h
7755+++ b/arch/parisc/include/asm/uaccess.h
7756@@ -246,10 +246,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
7757 const void __user *from,
7758 unsigned long n)
7759 {
7760- int sz = __compiletime_object_size(to);
7761+ size_t sz = __compiletime_object_size(to);
7762 int ret = -EFAULT;
7763
7764- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
7765+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
7766 ret = __copy_from_user(to, from, n);
7767 else
7768 copy_from_user_overflow();
7769diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
7770index 50dfafc..b9fc230 100644
7771--- a/arch/parisc/kernel/module.c
7772+++ b/arch/parisc/kernel/module.c
7773@@ -98,16 +98,38 @@
7774
7775 /* three functions to determine where in the module core
7776 * or init pieces the location is */
7777+static inline int in_init_rx(struct module *me, void *loc)
7778+{
7779+ return (loc >= me->module_init_rx &&
7780+ loc < (me->module_init_rx + me->init_size_rx));
7781+}
7782+
7783+static inline int in_init_rw(struct module *me, void *loc)
7784+{
7785+ return (loc >= me->module_init_rw &&
7786+ loc < (me->module_init_rw + me->init_size_rw));
7787+}
7788+
7789 static inline int in_init(struct module *me, void *loc)
7790 {
7791- return (loc >= me->module_init &&
7792- loc <= (me->module_init + me->init_size));
7793+ return in_init_rx(me, loc) || in_init_rw(me, loc);
7794+}
7795+
7796+static inline int in_core_rx(struct module *me, void *loc)
7797+{
7798+ return (loc >= me->module_core_rx &&
7799+ loc < (me->module_core_rx + me->core_size_rx));
7800+}
7801+
7802+static inline int in_core_rw(struct module *me, void *loc)
7803+{
7804+ return (loc >= me->module_core_rw &&
7805+ loc < (me->module_core_rw + me->core_size_rw));
7806 }
7807
7808 static inline int in_core(struct module *me, void *loc)
7809 {
7810- return (loc >= me->module_core &&
7811- loc <= (me->module_core + me->core_size));
7812+ return in_core_rx(me, loc) || in_core_rw(me, loc);
7813 }
7814
7815 static inline int in_local(struct module *me, void *loc)
7816@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
7817 }
7818
7819 /* align things a bit */
7820- me->core_size = ALIGN(me->core_size, 16);
7821- me->arch.got_offset = me->core_size;
7822- me->core_size += gots * sizeof(struct got_entry);
7823+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7824+ me->arch.got_offset = me->core_size_rw;
7825+ me->core_size_rw += gots * sizeof(struct got_entry);
7826
7827- me->core_size = ALIGN(me->core_size, 16);
7828- me->arch.fdesc_offset = me->core_size;
7829- me->core_size += fdescs * sizeof(Elf_Fdesc);
7830+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7831+ me->arch.fdesc_offset = me->core_size_rw;
7832+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7833
7834 me->arch.got_max = gots;
7835 me->arch.fdesc_max = fdescs;
7836@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7837
7838 BUG_ON(value == 0);
7839
7840- got = me->module_core + me->arch.got_offset;
7841+ got = me->module_core_rw + me->arch.got_offset;
7842 for (i = 0; got[i].addr; i++)
7843 if (got[i].addr == value)
7844 goto out;
7845@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7846 #ifdef CONFIG_64BIT
7847 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7848 {
7849- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7850+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7851
7852 if (!value) {
7853 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7854@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7855
7856 /* Create new one */
7857 fdesc->addr = value;
7858- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7859+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7860 return (Elf_Addr)fdesc;
7861 }
7862 #endif /* CONFIG_64BIT */
7863@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
7864
7865 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7866 end = table + sechdrs[me->arch.unwind_section].sh_size;
7867- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7868+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7869
7870 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7871 me->arch.unwind_section, table, end, gp);
7872diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7873index e1ffea2..46ed66e 100644
7874--- a/arch/parisc/kernel/sys_parisc.c
7875+++ b/arch/parisc/kernel/sys_parisc.c
7876@@ -89,6 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7877 unsigned long task_size = TASK_SIZE;
7878 int do_color_align, last_mmap;
7879 struct vm_unmapped_area_info info;
7880+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7881
7882 if (len > task_size)
7883 return -ENOMEM;
7884@@ -106,6 +107,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7885 goto found_addr;
7886 }
7887
7888+#ifdef CONFIG_PAX_RANDMMAP
7889+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7890+#endif
7891+
7892 if (addr) {
7893 if (do_color_align && last_mmap)
7894 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7895@@ -124,6 +129,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7896 info.high_limit = mmap_upper_limit();
7897 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7898 info.align_offset = shared_align_offset(last_mmap, pgoff);
7899+ info.threadstack_offset = offset;
7900 addr = vm_unmapped_area(&info);
7901
7902 found_addr:
7903@@ -143,6 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7904 unsigned long addr = addr0;
7905 int do_color_align, last_mmap;
7906 struct vm_unmapped_area_info info;
7907+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7908
7909 #ifdef CONFIG_64BIT
7910 /* This should only ever run for 32-bit processes. */
7911@@ -167,6 +174,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7912 }
7913
7914 /* requesting a specific address */
7915+#ifdef CONFIG_PAX_RANDMMAP
7916+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7917+#endif
7918+
7919 if (addr) {
7920 if (do_color_align && last_mmap)
7921 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7922@@ -184,6 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7923 info.high_limit = mm->mmap_base;
7924 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7925 info.align_offset = shared_align_offset(last_mmap, pgoff);
7926+ info.threadstack_offset = offset;
7927 addr = vm_unmapped_area(&info);
7928 if (!(addr & ~PAGE_MASK))
7929 goto found_addr;
7930@@ -249,6 +261,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7931 mm->mmap_legacy_base = mmap_legacy_base();
7932 mm->mmap_base = mmap_upper_limit();
7933
7934+#ifdef CONFIG_PAX_RANDMMAP
7935+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
7936+ mm->mmap_legacy_base += mm->delta_mmap;
7937+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7938+ }
7939+#endif
7940+
7941 if (mmap_is_legacy()) {
7942 mm->mmap_base = mm->mmap_legacy_base;
7943 mm->get_unmapped_area = arch_get_unmapped_area;
7944diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7945index 47ee620..1107387 100644
7946--- a/arch/parisc/kernel/traps.c
7947+++ b/arch/parisc/kernel/traps.c
7948@@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7949
7950 down_read(&current->mm->mmap_sem);
7951 vma = find_vma(current->mm,regs->iaoq[0]);
7952- if (vma && (regs->iaoq[0] >= vma->vm_start)
7953- && (vma->vm_flags & VM_EXEC)) {
7954-
7955+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7956 fault_address = regs->iaoq[0];
7957 fault_space = regs->iasq[0];
7958
7959diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7960index 3ca9c11..d163ef7 100644
7961--- a/arch/parisc/mm/fault.c
7962+++ b/arch/parisc/mm/fault.c
7963@@ -15,6 +15,7 @@
7964 #include <linux/sched.h>
7965 #include <linux/interrupt.h>
7966 #include <linux/module.h>
7967+#include <linux/unistd.h>
7968
7969 #include <asm/uaccess.h>
7970 #include <asm/traps.h>
7971@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
7972 static unsigned long
7973 parisc_acctyp(unsigned long code, unsigned int inst)
7974 {
7975- if (code == 6 || code == 16)
7976+ if (code == 6 || code == 7 || code == 16)
7977 return VM_EXEC;
7978
7979 switch (inst & 0xf0000000) {
7980@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
7981 }
7982 #endif
7983
7984+#ifdef CONFIG_PAX_PAGEEXEC
7985+/*
7986+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
7987+ *
7988+ * returns 1 when task should be killed
7989+ * 2 when rt_sigreturn trampoline was detected
7990+ * 3 when unpatched PLT trampoline was detected
7991+ */
7992+static int pax_handle_fetch_fault(struct pt_regs *regs)
7993+{
7994+
7995+#ifdef CONFIG_PAX_EMUPLT
7996+ int err;
7997+
7998+ do { /* PaX: unpatched PLT emulation */
7999+ unsigned int bl, depwi;
8000+
8001+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
8002+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
8003+
8004+ if (err)
8005+ break;
8006+
8007+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
8008+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
8009+
8010+ err = get_user(ldw, (unsigned int *)addr);
8011+ err |= get_user(bv, (unsigned int *)(addr+4));
8012+ err |= get_user(ldw2, (unsigned int *)(addr+8));
8013+
8014+ if (err)
8015+ break;
8016+
8017+ if (ldw == 0x0E801096U &&
8018+ bv == 0xEAC0C000U &&
8019+ ldw2 == 0x0E881095U)
8020+ {
8021+ unsigned int resolver, map;
8022+
8023+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
8024+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
8025+ if (err)
8026+ break;
8027+
8028+ regs->gr[20] = instruction_pointer(regs)+8;
8029+ regs->gr[21] = map;
8030+ regs->gr[22] = resolver;
8031+ regs->iaoq[0] = resolver | 3UL;
8032+ regs->iaoq[1] = regs->iaoq[0] + 4;
8033+ return 3;
8034+ }
8035+ }
8036+ } while (0);
8037+#endif
8038+
8039+#ifdef CONFIG_PAX_EMUTRAMP
8040+
8041+#ifndef CONFIG_PAX_EMUSIGRT
8042+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
8043+ return 1;
8044+#endif
8045+
8046+ do { /* PaX: rt_sigreturn emulation */
8047+ unsigned int ldi1, ldi2, bel, nop;
8048+
8049+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
8050+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
8051+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
8052+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
8053+
8054+ if (err)
8055+ break;
8056+
8057+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
8058+ ldi2 == 0x3414015AU &&
8059+ bel == 0xE4008200U &&
8060+ nop == 0x08000240U)
8061+ {
8062+ regs->gr[25] = (ldi1 & 2) >> 1;
8063+ regs->gr[20] = __NR_rt_sigreturn;
8064+ regs->gr[31] = regs->iaoq[1] + 16;
8065+ regs->sr[0] = regs->iasq[1];
8066+ regs->iaoq[0] = 0x100UL;
8067+ regs->iaoq[1] = regs->iaoq[0] + 4;
8068+ regs->iasq[0] = regs->sr[2];
8069+ regs->iasq[1] = regs->sr[2];
8070+ return 2;
8071+ }
8072+ } while (0);
8073+#endif
8074+
8075+ return 1;
8076+}
8077+
8078+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
8079+{
8080+ unsigned long i;
8081+
8082+ printk(KERN_ERR "PAX: bytes at PC: ");
8083+ for (i = 0; i < 5; i++) {
8084+ unsigned int c;
8085+ if (get_user(c, (unsigned int *)pc+i))
8086+ printk(KERN_CONT "???????? ");
8087+ else
8088+ printk(KERN_CONT "%08x ", c);
8089+ }
8090+ printk("\n");
8091+}
8092+#endif
8093+
8094 int fixup_exception(struct pt_regs *regs)
8095 {
8096 const struct exception_table_entry *fix;
8097@@ -234,8 +345,33 @@ retry:
8098
8099 good_area:
8100
8101- if ((vma->vm_flags & acc_type) != acc_type)
8102+ if ((vma->vm_flags & acc_type) != acc_type) {
8103+
8104+#ifdef CONFIG_PAX_PAGEEXEC
8105+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
8106+ (address & ~3UL) == instruction_pointer(regs))
8107+ {
8108+ up_read(&mm->mmap_sem);
8109+ switch (pax_handle_fetch_fault(regs)) {
8110+
8111+#ifdef CONFIG_PAX_EMUPLT
8112+ case 3:
8113+ return;
8114+#endif
8115+
8116+#ifdef CONFIG_PAX_EMUTRAMP
8117+ case 2:
8118+ return;
8119+#endif
8120+
8121+ }
8122+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
8123+ do_group_exit(SIGKILL);
8124+ }
8125+#endif
8126+
8127 goto bad_area;
8128+ }
8129
8130 /*
8131 * If for any reason at all we couldn't handle the fault, make
8132diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
8133index 4bc7b62..107e0b2 100644
8134--- a/arch/powerpc/Kconfig
8135+++ b/arch/powerpc/Kconfig
8136@@ -399,6 +399,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
8137 config KEXEC
8138 bool "kexec system call"
8139 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
8140+ depends on !GRKERNSEC_KMEM
8141 help
8142 kexec is a system call that implements the ability to shutdown your
8143 current kernel, and to start another kernel. It is like a reboot
8144diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
8145index 28992d0..434c881 100644
8146--- a/arch/powerpc/include/asm/atomic.h
8147+++ b/arch/powerpc/include/asm/atomic.h
8148@@ -12,6 +12,11 @@
8149
8150 #define ATOMIC_INIT(i) { (i) }
8151
8152+#define _ASM_EXTABLE(from, to) \
8153+" .section __ex_table,\"a\"\n" \
8154+ PPC_LONG" " #from ", " #to"\n" \
8155+" .previous\n"
8156+
8157 static __inline__ int atomic_read(const atomic_t *v)
8158 {
8159 int t;
8160@@ -21,16 +26,61 @@ static __inline__ int atomic_read(const atomic_t *v)
8161 return t;
8162 }
8163
8164+static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v)
8165+{
8166+ int t;
8167+
8168+ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
8169+
8170+ return t;
8171+}
8172+
8173 static __inline__ void atomic_set(atomic_t *v, int i)
8174 {
8175 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
8176 }
8177
8178+static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8179+{
8180+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
8181+}
8182+
8183 static __inline__ void atomic_add(int a, atomic_t *v)
8184 {
8185 int t;
8186
8187 __asm__ __volatile__(
8188+"1: lwarx %0,0,%3 # atomic_add\n"
8189+
8190+#ifdef CONFIG_PAX_REFCOUNT
8191+" mcrxr cr0\n"
8192+" addo. %0,%2,%0\n"
8193+" bf 4*cr0+so, 3f\n"
8194+"2:.long " "0x00c00b00""\n"
8195+#else
8196+" add %0,%2,%0\n"
8197+#endif
8198+
8199+"3:\n"
8200+ PPC405_ERR77(0,%3)
8201+" stwcx. %0,0,%3 \n\
8202+ bne- 1b"
8203+
8204+#ifdef CONFIG_PAX_REFCOUNT
8205+"\n4:\n"
8206+ _ASM_EXTABLE(2b, 4b)
8207+#endif
8208+
8209+ : "=&r" (t), "+m" (v->counter)
8210+ : "r" (a), "r" (&v->counter)
8211+ : "cc");
8212+}
8213+
8214+static __inline__ void atomic_add_unchecked(int a, atomic_unchecked_t *v)
8215+{
8216+ int t;
8217+
8218+ __asm__ __volatile__(
8219 "1: lwarx %0,0,%3 # atomic_add\n\
8220 add %0,%2,%0\n"
8221 PPC405_ERR77(0,%3)
8222@@ -41,12 +91,49 @@ static __inline__ void atomic_add(int a, atomic_t *v)
8223 : "cc");
8224 }
8225
8226+/* Same as atomic_add but return the value */
8227 static __inline__ int atomic_add_return(int a, atomic_t *v)
8228 {
8229 int t;
8230
8231 __asm__ __volatile__(
8232 PPC_ATOMIC_ENTRY_BARRIER
8233+"1: lwarx %0,0,%2 # atomic_add_return\n"
8234+
8235+#ifdef CONFIG_PAX_REFCOUNT
8236+" mcrxr cr0\n"
8237+" addo. %0,%1,%0\n"
8238+" bf 4*cr0+so, 3f\n"
8239+"2:.long " "0x00c00b00""\n"
8240+#else
8241+" add %0,%1,%0\n"
8242+#endif
8243+
8244+"3:\n"
8245+ PPC405_ERR77(0,%2)
8246+" stwcx. %0,0,%2 \n\
8247+ bne- 1b\n"
8248+"4:"
8249+
8250+#ifdef CONFIG_PAX_REFCOUNT
8251+ _ASM_EXTABLE(2b, 4b)
8252+#endif
8253+
8254+ PPC_ATOMIC_EXIT_BARRIER
8255+ : "=&r" (t)
8256+ : "r" (a), "r" (&v->counter)
8257+ : "cc", "memory");
8258+
8259+ return t;
8260+}
8261+
8262+/* Same as atomic_add_unchecked but return the value */
8263+static __inline__ int atomic_add_return_unchecked(int a, atomic_unchecked_t *v)
8264+{
8265+ int t;
8266+
8267+ __asm__ __volatile__(
8268+ PPC_ATOMIC_ENTRY_BARRIER
8269 "1: lwarx %0,0,%2 # atomic_add_return\n\
8270 add %0,%1,%0\n"
8271 PPC405_ERR77(0,%2)
8272@@ -67,6 +154,37 @@ static __inline__ void atomic_sub(int a, atomic_t *v)
8273 int t;
8274
8275 __asm__ __volatile__(
8276+"1: lwarx %0,0,%3 # atomic_sub\n"
8277+
8278+#ifdef CONFIG_PAX_REFCOUNT
8279+" mcrxr cr0\n"
8280+" subfo. %0,%2,%0\n"
8281+" bf 4*cr0+so, 3f\n"
8282+"2:.long " "0x00c00b00""\n"
8283+#else
8284+" subf %0,%2,%0\n"
8285+#endif
8286+
8287+"3:\n"
8288+ PPC405_ERR77(0,%3)
8289+" stwcx. %0,0,%3 \n\
8290+ bne- 1b\n"
8291+"4:"
8292+
8293+#ifdef CONFIG_PAX_REFCOUNT
8294+ _ASM_EXTABLE(2b, 4b)
8295+#endif
8296+
8297+ : "=&r" (t), "+m" (v->counter)
8298+ : "r" (a), "r" (&v->counter)
8299+ : "cc");
8300+}
8301+
8302+static __inline__ void atomic_sub_unchecked(int a, atomic_unchecked_t *v)
8303+{
8304+ int t;
8305+
8306+ __asm__ __volatile__(
8307 "1: lwarx %0,0,%3 # atomic_sub\n\
8308 subf %0,%2,%0\n"
8309 PPC405_ERR77(0,%3)
8310@@ -77,12 +195,49 @@ static __inline__ void atomic_sub(int a, atomic_t *v)
8311 : "cc");
8312 }
8313
8314+/* Same as atomic_sub but return the value */
8315 static __inline__ int atomic_sub_return(int a, atomic_t *v)
8316 {
8317 int t;
8318
8319 __asm__ __volatile__(
8320 PPC_ATOMIC_ENTRY_BARRIER
8321+"1: lwarx %0,0,%2 # atomic_sub_return\n"
8322+
8323+#ifdef CONFIG_PAX_REFCOUNT
8324+" mcrxr cr0\n"
8325+" subfo. %0,%1,%0\n"
8326+" bf 4*cr0+so, 3f\n"
8327+"2:.long " "0x00c00b00""\n"
8328+#else
8329+" subf %0,%1,%0\n"
8330+#endif
8331+
8332+"3:\n"
8333+ PPC405_ERR77(0,%2)
8334+" stwcx. %0,0,%2 \n\
8335+ bne- 1b\n"
8336+ PPC_ATOMIC_EXIT_BARRIER
8337+"4:"
8338+
8339+#ifdef CONFIG_PAX_REFCOUNT
8340+ _ASM_EXTABLE(2b, 4b)
8341+#endif
8342+
8343+ : "=&r" (t)
8344+ : "r" (a), "r" (&v->counter)
8345+ : "cc", "memory");
8346+
8347+ return t;
8348+}
8349+
8350+/* Same as atomic_sub_unchecked but return the value */
8351+static __inline__ int atomic_sub_return_unchecked(int a, atomic_unchecked_t *v)
8352+{
8353+ int t;
8354+
8355+ __asm__ __volatile__(
8356+ PPC_ATOMIC_ENTRY_BARRIER
8357 "1: lwarx %0,0,%2 # atomic_sub_return\n\
8358 subf %0,%1,%0\n"
8359 PPC405_ERR77(0,%2)
8360@@ -96,38 +251,23 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v)
8361 return t;
8362 }
8363
8364-static __inline__ void atomic_inc(atomic_t *v)
8365-{
8366- int t;
8367+/*
8368+ * atomic_inc - increment atomic variable
8369+ * @v: pointer of type atomic_t
8370+ *
8371+ * Automatically increments @v by 1
8372+ */
8373+#define atomic_inc(v) atomic_add(1, (v))
8374+#define atomic_inc_return(v) atomic_add_return(1, (v))
8375
8376- __asm__ __volatile__(
8377-"1: lwarx %0,0,%2 # atomic_inc\n\
8378- addic %0,%0,1\n"
8379- PPC405_ERR77(0,%2)
8380-" stwcx. %0,0,%2 \n\
8381- bne- 1b"
8382- : "=&r" (t), "+m" (v->counter)
8383- : "r" (&v->counter)
8384- : "cc", "xer");
8385+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
8386+{
8387+ atomic_add_unchecked(1, v);
8388 }
8389
8390-static __inline__ int atomic_inc_return(atomic_t *v)
8391+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8392 {
8393- int t;
8394-
8395- __asm__ __volatile__(
8396- PPC_ATOMIC_ENTRY_BARRIER
8397-"1: lwarx %0,0,%1 # atomic_inc_return\n\
8398- addic %0,%0,1\n"
8399- PPC405_ERR77(0,%1)
8400-" stwcx. %0,0,%1 \n\
8401- bne- 1b"
8402- PPC_ATOMIC_EXIT_BARRIER
8403- : "=&r" (t)
8404- : "r" (&v->counter)
8405- : "cc", "xer", "memory");
8406-
8407- return t;
8408+ return atomic_add_return_unchecked(1, v);
8409 }
8410
8411 /*
8412@@ -140,43 +280,38 @@ static __inline__ int atomic_inc_return(atomic_t *v)
8413 */
8414 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
8415
8416-static __inline__ void atomic_dec(atomic_t *v)
8417+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8418 {
8419- int t;
8420-
8421- __asm__ __volatile__(
8422-"1: lwarx %0,0,%2 # atomic_dec\n\
8423- addic %0,%0,-1\n"
8424- PPC405_ERR77(0,%2)\
8425-" stwcx. %0,0,%2\n\
8426- bne- 1b"
8427- : "=&r" (t), "+m" (v->counter)
8428- : "r" (&v->counter)
8429- : "cc", "xer");
8430+ return atomic_add_return_unchecked(1, v) == 0;
8431 }
8432
8433-static __inline__ int atomic_dec_return(atomic_t *v)
8434+/*
8435+ * atomic_dec - decrement atomic variable
8436+ * @v: pointer of type atomic_t
8437+ *
8438+ * Atomically decrements @v by 1
8439+ */
8440+#define atomic_dec(v) atomic_sub(1, (v))
8441+#define atomic_dec_return(v) atomic_sub_return(1, (v))
8442+
8443+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
8444 {
8445- int t;
8446-
8447- __asm__ __volatile__(
8448- PPC_ATOMIC_ENTRY_BARRIER
8449-"1: lwarx %0,0,%1 # atomic_dec_return\n\
8450- addic %0,%0,-1\n"
8451- PPC405_ERR77(0,%1)
8452-" stwcx. %0,0,%1\n\
8453- bne- 1b"
8454- PPC_ATOMIC_EXIT_BARRIER
8455- : "=&r" (t)
8456- : "r" (&v->counter)
8457- : "cc", "xer", "memory");
8458-
8459- return t;
8460+ atomic_sub_unchecked(1, v);
8461 }
8462
8463 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8464 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
8465
8466+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8467+{
8468+ return cmpxchg(&(v->counter), old, new);
8469+}
8470+
8471+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8472+{
8473+ return xchg(&(v->counter), new);
8474+}
8475+
8476 /**
8477 * __atomic_add_unless - add unless the number is a given value
8478 * @v: pointer of type atomic_t
8479@@ -271,6 +406,11 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
8480 }
8481 #define atomic_dec_if_positive atomic_dec_if_positive
8482
8483+#define smp_mb__before_atomic_dec() smp_mb()
8484+#define smp_mb__after_atomic_dec() smp_mb()
8485+#define smp_mb__before_atomic_inc() smp_mb()
8486+#define smp_mb__after_atomic_inc() smp_mb()
8487+
8488 #ifdef __powerpc64__
8489
8490 #define ATOMIC64_INIT(i) { (i) }
8491@@ -284,11 +424,25 @@ static __inline__ long atomic64_read(const atomic64_t *v)
8492 return t;
8493 }
8494
8495+static __inline__ long atomic64_read_unchecked(const atomic64_unchecked_t *v)
8496+{
8497+ long t;
8498+
8499+ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
8500+
8501+ return t;
8502+}
8503+
8504 static __inline__ void atomic64_set(atomic64_t *v, long i)
8505 {
8506 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
8507 }
8508
8509+static __inline__ void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
8510+{
8511+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
8512+}
8513+
8514 static __inline__ void atomic64_add(long a, atomic64_t *v)
8515 {
8516 long t;
8517@@ -303,12 +457,76 @@ static __inline__ void atomic64_add(long a, atomic64_t *v)
8518 : "cc");
8519 }
8520
8521+static __inline__ void atomic64_add_unchecked(long a, atomic64_unchecked_t *v)
8522+{
8523+ long t;
8524+
8525+ __asm__ __volatile__(
8526+"1: ldarx %0,0,%3 # atomic64_add\n"
8527+
8528+#ifdef CONFIG_PAX_REFCOUNT
8529+" mcrxr cr0\n"
8530+" addo. %0,%2,%0\n"
8531+" bf 4*cr0+so, 3f\n"
8532+"2:.long " "0x00c00b00""\n"
8533+#else
8534+" add %0,%2,%0\n"
8535+#endif
8536+
8537+"3:\n"
8538+" stdcx. %0,0,%3 \n\
8539+ bne- 1b\n"
8540+"4:"
8541+
8542+#ifdef CONFIG_PAX_REFCOUNT
8543+ _ASM_EXTABLE(2b, 4b)
8544+#endif
8545+
8546+ : "=&r" (t), "+m" (v->counter)
8547+ : "r" (a), "r" (&v->counter)
8548+ : "cc");
8549+}
8550+
8551 static __inline__ long atomic64_add_return(long a, atomic64_t *v)
8552 {
8553 long t;
8554
8555 __asm__ __volatile__(
8556 PPC_ATOMIC_ENTRY_BARRIER
8557+"1: ldarx %0,0,%2 # atomic64_add_return\n"
8558+
8559+#ifdef CONFIG_PAX_REFCOUNT
8560+" mcrxr cr0\n"
8561+" addo. %0,%1,%0\n"
8562+" bf 4*cr0+so, 3f\n"
8563+"2:.long " "0x00c00b00""\n"
8564+#else
8565+" add %0,%1,%0\n"
8566+#endif
8567+
8568+"3:\n"
8569+" stdcx. %0,0,%2 \n\
8570+ bne- 1b\n"
8571+ PPC_ATOMIC_EXIT_BARRIER
8572+"4:"
8573+
8574+#ifdef CONFIG_PAX_REFCOUNT
8575+ _ASM_EXTABLE(2b, 4b)
8576+#endif
8577+
8578+ : "=&r" (t)
8579+ : "r" (a), "r" (&v->counter)
8580+ : "cc", "memory");
8581+
8582+ return t;
8583+}
8584+
8585+static __inline__ long atomic64_add_return_unchecked(long a, atomic64_unchecked_t *v)
8586+{
8587+ long t;
8588+
8589+ __asm__ __volatile__(
8590+ PPC_ATOMIC_ENTRY_BARRIER
8591 "1: ldarx %0,0,%2 # atomic64_add_return\n\
8592 add %0,%1,%0\n\
8593 stdcx. %0,0,%2 \n\
8594@@ -328,6 +546,36 @@ static __inline__ void atomic64_sub(long a, atomic64_t *v)
8595 long t;
8596
8597 __asm__ __volatile__(
8598+"1: ldarx %0,0,%3 # atomic64_sub\n"
8599+
8600+#ifdef CONFIG_PAX_REFCOUNT
8601+" mcrxr cr0\n"
8602+" subfo. %0,%2,%0\n"
8603+" bf 4*cr0+so, 3f\n"
8604+"2:.long " "0x00c00b00""\n"
8605+#else
8606+" subf %0,%2,%0\n"
8607+#endif
8608+
8609+"3:\n"
8610+" stdcx. %0,0,%3 \n\
8611+ bne- 1b"
8612+"4:"
8613+
8614+#ifdef CONFIG_PAX_REFCOUNT
8615+ _ASM_EXTABLE(2b, 4b)
8616+#endif
8617+
8618+ : "=&r" (t), "+m" (v->counter)
8619+ : "r" (a), "r" (&v->counter)
8620+ : "cc");
8621+}
8622+
8623+static __inline__ void atomic64_sub_unchecked(long a, atomic64_unchecked_t *v)
8624+{
8625+ long t;
8626+
8627+ __asm__ __volatile__(
8628 "1: ldarx %0,0,%3 # atomic64_sub\n\
8629 subf %0,%2,%0\n\
8630 stdcx. %0,0,%3 \n\
8631@@ -343,6 +591,40 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
8632
8633 __asm__ __volatile__(
8634 PPC_ATOMIC_ENTRY_BARRIER
8635+"1: ldarx %0,0,%2 # atomic64_sub_return\n"
8636+
8637+#ifdef CONFIG_PAX_REFCOUNT
8638+" mcrxr cr0\n"
8639+" subfo. %0,%1,%0\n"
8640+" bf 4*cr0+so, 3f\n"
8641+"2:.long " "0x00c00b00""\n"
8642+#else
8643+" subf %0,%1,%0\n"
8644+#endif
8645+
8646+"3:\n"
8647+" stdcx. %0,0,%2 \n\
8648+ bne- 1b\n"
8649+ PPC_ATOMIC_EXIT_BARRIER
8650+"4:"
8651+
8652+#ifdef CONFIG_PAX_REFCOUNT
8653+ _ASM_EXTABLE(2b, 4b)
8654+#endif
8655+
8656+ : "=&r" (t)
8657+ : "r" (a), "r" (&v->counter)
8658+ : "cc", "memory");
8659+
8660+ return t;
8661+}
8662+
8663+static __inline__ long atomic64_sub_return_unchecked(long a, atomic64_unchecked_t *v)
8664+{
8665+ long t;
8666+
8667+ __asm__ __volatile__(
8668+ PPC_ATOMIC_ENTRY_BARRIER
8669 "1: ldarx %0,0,%2 # atomic64_sub_return\n\
8670 subf %0,%1,%0\n\
8671 stdcx. %0,0,%2 \n\
8672@@ -355,36 +637,23 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
8673 return t;
8674 }
8675
8676-static __inline__ void atomic64_inc(atomic64_t *v)
8677-{
8678- long t;
8679+/*
8680+ * atomic64_inc - increment atomic variable
8681+ * @v: pointer of type atomic64_t
8682+ *
8683+ * Automatically increments @v by 1
8684+ */
8685+#define atomic64_inc(v) atomic64_add(1, (v))
8686+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
8687
8688- __asm__ __volatile__(
8689-"1: ldarx %0,0,%2 # atomic64_inc\n\
8690- addic %0,%0,1\n\
8691- stdcx. %0,0,%2 \n\
8692- bne- 1b"
8693- : "=&r" (t), "+m" (v->counter)
8694- : "r" (&v->counter)
8695- : "cc", "xer");
8696+static __inline__ void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8697+{
8698+ atomic64_add_unchecked(1, v);
8699 }
8700
8701-static __inline__ long atomic64_inc_return(atomic64_t *v)
8702+static __inline__ int atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8703 {
8704- long t;
8705-
8706- __asm__ __volatile__(
8707- PPC_ATOMIC_ENTRY_BARRIER
8708-"1: ldarx %0,0,%1 # atomic64_inc_return\n\
8709- addic %0,%0,1\n\
8710- stdcx. %0,0,%1 \n\
8711- bne- 1b"
8712- PPC_ATOMIC_EXIT_BARRIER
8713- : "=&r" (t)
8714- : "r" (&v->counter)
8715- : "cc", "xer", "memory");
8716-
8717- return t;
8718+ return atomic64_add_return_unchecked(1, v);
8719 }
8720
8721 /*
8722@@ -397,36 +666,18 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
8723 */
8724 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
8725
8726-static __inline__ void atomic64_dec(atomic64_t *v)
8727+/*
8728+ * atomic64_dec - decrement atomic variable
8729+ * @v: pointer of type atomic64_t
8730+ *
8731+ * Atomically decrements @v by 1
8732+ */
8733+#define atomic64_dec(v) atomic64_sub(1, (v))
8734+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
8735+
8736+static __inline__ void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8737 {
8738- long t;
8739-
8740- __asm__ __volatile__(
8741-"1: ldarx %0,0,%2 # atomic64_dec\n\
8742- addic %0,%0,-1\n\
8743- stdcx. %0,0,%2\n\
8744- bne- 1b"
8745- : "=&r" (t), "+m" (v->counter)
8746- : "r" (&v->counter)
8747- : "cc", "xer");
8748-}
8749-
8750-static __inline__ long atomic64_dec_return(atomic64_t *v)
8751-{
8752- long t;
8753-
8754- __asm__ __volatile__(
8755- PPC_ATOMIC_ENTRY_BARRIER
8756-"1: ldarx %0,0,%1 # atomic64_dec_return\n\
8757- addic %0,%0,-1\n\
8758- stdcx. %0,0,%1\n\
8759- bne- 1b"
8760- PPC_ATOMIC_EXIT_BARRIER
8761- : "=&r" (t)
8762- : "r" (&v->counter)
8763- : "cc", "xer", "memory");
8764-
8765- return t;
8766+ atomic64_sub_unchecked(1, v);
8767 }
8768
8769 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
8770@@ -459,6 +710,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
8771 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8772 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
8773
8774+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8775+{
8776+ return cmpxchg(&(v->counter), old, new);
8777+}
8778+
8779+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8780+{
8781+ return xchg(&(v->counter), new);
8782+}
8783+
8784 /**
8785 * atomic64_add_unless - add unless the number is a given value
8786 * @v: pointer of type atomic64_t
8787diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
8788index bab79a1..4a3eabc 100644
8789--- a/arch/powerpc/include/asm/barrier.h
8790+++ b/arch/powerpc/include/asm/barrier.h
8791@@ -73,7 +73,7 @@
8792 do { \
8793 compiletime_assert_atomic_type(*p); \
8794 __lwsync(); \
8795- ACCESS_ONCE(*p) = (v); \
8796+ ACCESS_ONCE_RW(*p) = (v); \
8797 } while (0)
8798
8799 #define smp_load_acquire(p) \
8800diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
8801index 34a05a1..a1f2c67 100644
8802--- a/arch/powerpc/include/asm/cache.h
8803+++ b/arch/powerpc/include/asm/cache.h
8804@@ -4,6 +4,7 @@
8805 #ifdef __KERNEL__
8806
8807 #include <asm/reg.h>
8808+#include <linux/const.h>
8809
8810 /* bytes per L1 cache line */
8811 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
8812@@ -23,7 +24,7 @@
8813 #define L1_CACHE_SHIFT 7
8814 #endif
8815
8816-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8817+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8818
8819 #define SMP_CACHE_BYTES L1_CACHE_BYTES
8820
8821diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
8822index 888d8f3..66f581c 100644
8823--- a/arch/powerpc/include/asm/elf.h
8824+++ b/arch/powerpc/include/asm/elf.h
8825@@ -28,8 +28,19 @@
8826 the loader. We need to make sure that it is out of the way of the program
8827 that it will "exec", and that there is sufficient room for the brk. */
8828
8829-extern unsigned long randomize_et_dyn(unsigned long base);
8830-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
8831+#define ELF_ET_DYN_BASE (0x20000000)
8832+
8833+#ifdef CONFIG_PAX_ASLR
8834+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
8835+
8836+#ifdef __powerpc64__
8837+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
8838+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
8839+#else
8840+#define PAX_DELTA_MMAP_LEN 15
8841+#define PAX_DELTA_STACK_LEN 15
8842+#endif
8843+#endif
8844
8845 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
8846
8847@@ -129,10 +140,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8848 (0x7ff >> (PAGE_SHIFT - 12)) : \
8849 (0x3ffff >> (PAGE_SHIFT - 12)))
8850
8851-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8852-#define arch_randomize_brk arch_randomize_brk
8853-
8854-
8855 #ifdef CONFIG_SPU_BASE
8856 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
8857 #define NT_SPU 1
8858diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
8859index 8196e9c..d83a9f3 100644
8860--- a/arch/powerpc/include/asm/exec.h
8861+++ b/arch/powerpc/include/asm/exec.h
8862@@ -4,6 +4,6 @@
8863 #ifndef _ASM_POWERPC_EXEC_H
8864 #define _ASM_POWERPC_EXEC_H
8865
8866-extern unsigned long arch_align_stack(unsigned long sp);
8867+#define arch_align_stack(x) ((x) & ~0xfUL)
8868
8869 #endif /* _ASM_POWERPC_EXEC_H */
8870diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
8871index 5acabbd..7ea14fa 100644
8872--- a/arch/powerpc/include/asm/kmap_types.h
8873+++ b/arch/powerpc/include/asm/kmap_types.h
8874@@ -10,7 +10,7 @@
8875 * 2 of the License, or (at your option) any later version.
8876 */
8877
8878-#define KM_TYPE_NR 16
8879+#define KM_TYPE_NR 17
8880
8881 #endif /* __KERNEL__ */
8882 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
8883diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
8884index b8da913..c02b593 100644
8885--- a/arch/powerpc/include/asm/local.h
8886+++ b/arch/powerpc/include/asm/local.h
8887@@ -9,21 +9,65 @@ typedef struct
8888 atomic_long_t a;
8889 } local_t;
8890
8891+typedef struct
8892+{
8893+ atomic_long_unchecked_t a;
8894+} local_unchecked_t;
8895+
8896 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
8897
8898 #define local_read(l) atomic_long_read(&(l)->a)
8899+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
8900 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
8901+#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
8902
8903 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
8904+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
8905 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
8906+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
8907 #define local_inc(l) atomic_long_inc(&(l)->a)
8908+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
8909 #define local_dec(l) atomic_long_dec(&(l)->a)
8910+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
8911
8912 static __inline__ long local_add_return(long a, local_t *l)
8913 {
8914 long t;
8915
8916 __asm__ __volatile__(
8917+"1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n"
8918+
8919+#ifdef CONFIG_PAX_REFCOUNT
8920+" mcrxr cr0\n"
8921+" addo. %0,%1,%0\n"
8922+" bf 4*cr0+so, 3f\n"
8923+"2:.long " "0x00c00b00""\n"
8924+#else
8925+" add %0,%1,%0\n"
8926+#endif
8927+
8928+"3:\n"
8929+ PPC405_ERR77(0,%2)
8930+ PPC_STLCX "%0,0,%2 \n\
8931+ bne- 1b"
8932+
8933+#ifdef CONFIG_PAX_REFCOUNT
8934+"\n4:\n"
8935+ _ASM_EXTABLE(2b, 4b)
8936+#endif
8937+
8938+ : "=&r" (t)
8939+ : "r" (a), "r" (&(l->a.counter))
8940+ : "cc", "memory");
8941+
8942+ return t;
8943+}
8944+
8945+static __inline__ long local_add_return_unchecked(long a, local_unchecked_t *l)
8946+{
8947+ long t;
8948+
8949+ __asm__ __volatile__(
8950 "1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n\
8951 add %0,%1,%0\n"
8952 PPC405_ERR77(0,%2)
8953@@ -101,6 +145,8 @@ static __inline__ long local_dec_return(local_t *l)
8954
8955 #define local_cmpxchg(l, o, n) \
8956 (cmpxchg_local(&((l)->a.counter), (o), (n)))
8957+#define local_cmpxchg_unchecked(l, o, n) \
8958+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
8959 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
8960
8961 /**
8962diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
8963index 8565c25..2865190 100644
8964--- a/arch/powerpc/include/asm/mman.h
8965+++ b/arch/powerpc/include/asm/mman.h
8966@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
8967 }
8968 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
8969
8970-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
8971+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
8972 {
8973 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
8974 }
8975diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
8976index 26fe1ae..987ffc5 100644
8977--- a/arch/powerpc/include/asm/page.h
8978+++ b/arch/powerpc/include/asm/page.h
8979@@ -227,8 +227,9 @@ extern long long virt_phys_offset;
8980 * and needs to be executable. This means the whole heap ends
8981 * up being executable.
8982 */
8983-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8984- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8985+#define VM_DATA_DEFAULT_FLAGS32 \
8986+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8987+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8988
8989 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8990 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8991@@ -256,6 +257,9 @@ extern long long virt_phys_offset;
8992 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
8993 #endif
8994
8995+#define ktla_ktva(addr) (addr)
8996+#define ktva_ktla(addr) (addr)
8997+
8998 #ifndef CONFIG_PPC_BOOK3S_64
8999 /*
9000 * Use the top bit of the higher-level page table entries to indicate whether
9001diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
9002index 88693ce..ac6f9ab 100644
9003--- a/arch/powerpc/include/asm/page_64.h
9004+++ b/arch/powerpc/include/asm/page_64.h
9005@@ -153,15 +153,18 @@ do { \
9006 * stack by default, so in the absence of a PT_GNU_STACK program header
9007 * we turn execute permission off.
9008 */
9009-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
9010- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
9011+#define VM_STACK_DEFAULT_FLAGS32 \
9012+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
9013+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
9014
9015 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
9016 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
9017
9018+#ifndef CONFIG_PAX_PAGEEXEC
9019 #define VM_STACK_DEFAULT_FLAGS \
9020 (is_32bit_task() ? \
9021 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
9022+#endif
9023
9024 #include <asm-generic/getorder.h>
9025
9026diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
9027index 4b0be20..c15a27d 100644
9028--- a/arch/powerpc/include/asm/pgalloc-64.h
9029+++ b/arch/powerpc/include/asm/pgalloc-64.h
9030@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
9031 #ifndef CONFIG_PPC_64K_PAGES
9032
9033 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
9034+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
9035
9036 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
9037 {
9038@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
9039 pud_set(pud, (unsigned long)pmd);
9040 }
9041
9042+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
9043+{
9044+ pud_populate(mm, pud, pmd);
9045+}
9046+
9047 #define pmd_populate(mm, pmd, pte_page) \
9048 pmd_populate_kernel(mm, pmd, page_address(pte_page))
9049 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
9050@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
9051 #endif
9052
9053 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
9054+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
9055
9056 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
9057 pte_t *pte)
9058diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
9059index d98c1ec..9f61569 100644
9060--- a/arch/powerpc/include/asm/pgtable.h
9061+++ b/arch/powerpc/include/asm/pgtable.h
9062@@ -2,6 +2,7 @@
9063 #define _ASM_POWERPC_PGTABLE_H
9064 #ifdef __KERNEL__
9065
9066+#include <linux/const.h>
9067 #ifndef __ASSEMBLY__
9068 #include <linux/mmdebug.h>
9069 #include <asm/processor.h> /* For TASK_SIZE */
9070diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
9071index 4aad413..85d86bf 100644
9072--- a/arch/powerpc/include/asm/pte-hash32.h
9073+++ b/arch/powerpc/include/asm/pte-hash32.h
9074@@ -21,6 +21,7 @@
9075 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
9076 #define _PAGE_USER 0x004 /* usermode access allowed */
9077 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
9078+#define _PAGE_EXEC _PAGE_GUARDED
9079 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
9080 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
9081 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
9082diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
9083index 0c05059..7e056e4 100644
9084--- a/arch/powerpc/include/asm/reg.h
9085+++ b/arch/powerpc/include/asm/reg.h
9086@@ -251,6 +251,7 @@
9087 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
9088 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
9089 #define DSISR_NOHPTE 0x40000000 /* no translation found */
9090+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
9091 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
9092 #define DSISR_ISSTORE 0x02000000 /* access was a store */
9093 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
9094diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
9095index 5a6614a..d89995d1 100644
9096--- a/arch/powerpc/include/asm/smp.h
9097+++ b/arch/powerpc/include/asm/smp.h
9098@@ -51,7 +51,7 @@ struct smp_ops_t {
9099 int (*cpu_disable)(void);
9100 void (*cpu_die)(unsigned int nr);
9101 int (*cpu_bootable)(unsigned int nr);
9102-};
9103+} __no_const;
9104
9105 extern void smp_send_debugger_break(void);
9106 extern void start_secondary_resume(void);
9107diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
9108index 4dbe072..b803275 100644
9109--- a/arch/powerpc/include/asm/spinlock.h
9110+++ b/arch/powerpc/include/asm/spinlock.h
9111@@ -204,13 +204,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
9112 __asm__ __volatile__(
9113 "1: " PPC_LWARX(%0,0,%1,1) "\n"
9114 __DO_SIGN_EXTEND
9115-" addic. %0,%0,1\n\
9116- ble- 2f\n"
9117+
9118+#ifdef CONFIG_PAX_REFCOUNT
9119+" mcrxr cr0\n"
9120+" addico. %0,%0,1\n"
9121+" bf 4*cr0+so, 3f\n"
9122+"2:.long " "0x00c00b00""\n"
9123+#else
9124+" addic. %0,%0,1\n"
9125+#endif
9126+
9127+"3:\n"
9128+ "ble- 4f\n"
9129 PPC405_ERR77(0,%1)
9130 " stwcx. %0,0,%1\n\
9131 bne- 1b\n"
9132 PPC_ACQUIRE_BARRIER
9133-"2:" : "=&r" (tmp)
9134+"4:"
9135+
9136+#ifdef CONFIG_PAX_REFCOUNT
9137+ _ASM_EXTABLE(2b,4b)
9138+#endif
9139+
9140+ : "=&r" (tmp)
9141 : "r" (&rw->lock)
9142 : "cr0", "xer", "memory");
9143
9144@@ -286,11 +302,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
9145 __asm__ __volatile__(
9146 "# read_unlock\n\t"
9147 PPC_RELEASE_BARRIER
9148-"1: lwarx %0,0,%1\n\
9149- addic %0,%0,-1\n"
9150+"1: lwarx %0,0,%1\n"
9151+
9152+#ifdef CONFIG_PAX_REFCOUNT
9153+" mcrxr cr0\n"
9154+" addico. %0,%0,-1\n"
9155+" bf 4*cr0+so, 3f\n"
9156+"2:.long " "0x00c00b00""\n"
9157+#else
9158+" addic. %0,%0,-1\n"
9159+#endif
9160+
9161+"3:\n"
9162 PPC405_ERR77(0,%1)
9163 " stwcx. %0,0,%1\n\
9164 bne- 1b"
9165+
9166+#ifdef CONFIG_PAX_REFCOUNT
9167+"\n4:\n"
9168+ _ASM_EXTABLE(2b, 4b)
9169+#endif
9170+
9171 : "=&r"(tmp)
9172 : "r"(&rw->lock)
9173 : "cr0", "xer", "memory");
9174diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
9175index b034ecd..af7e31f 100644
9176--- a/arch/powerpc/include/asm/thread_info.h
9177+++ b/arch/powerpc/include/asm/thread_info.h
9178@@ -107,6 +107,8 @@ static inline struct thread_info *current_thread_info(void)
9179 #if defined(CONFIG_PPC64)
9180 #define TIF_ELF2ABI 18 /* function descriptors must die! */
9181 #endif
9182+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
9183+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
9184
9185 /* as above, but as bit values */
9186 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
9187@@ -125,9 +127,10 @@ static inline struct thread_info *current_thread_info(void)
9188 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
9189 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
9190 #define _TIF_NOHZ (1<<TIF_NOHZ)
9191+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
9192 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
9193 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
9194- _TIF_NOHZ)
9195+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
9196
9197 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
9198 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
9199diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
9200index 9485b43..3bd3c16 100644
9201--- a/arch/powerpc/include/asm/uaccess.h
9202+++ b/arch/powerpc/include/asm/uaccess.h
9203@@ -58,6 +58,7 @@
9204
9205 #endif
9206
9207+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
9208 #define access_ok(type, addr, size) \
9209 (__chk_user_ptr(addr), \
9210 __access_ok((__force unsigned long)(addr), (size), get_fs()))
9211@@ -318,52 +319,6 @@ do { \
9212 extern unsigned long __copy_tofrom_user(void __user *to,
9213 const void __user *from, unsigned long size);
9214
9215-#ifndef __powerpc64__
9216-
9217-static inline unsigned long copy_from_user(void *to,
9218- const void __user *from, unsigned long n)
9219-{
9220- unsigned long over;
9221-
9222- if (access_ok(VERIFY_READ, from, n))
9223- return __copy_tofrom_user((__force void __user *)to, from, n);
9224- if ((unsigned long)from < TASK_SIZE) {
9225- over = (unsigned long)from + n - TASK_SIZE;
9226- return __copy_tofrom_user((__force void __user *)to, from,
9227- n - over) + over;
9228- }
9229- return n;
9230-}
9231-
9232-static inline unsigned long copy_to_user(void __user *to,
9233- const void *from, unsigned long n)
9234-{
9235- unsigned long over;
9236-
9237- if (access_ok(VERIFY_WRITE, to, n))
9238- return __copy_tofrom_user(to, (__force void __user *)from, n);
9239- if ((unsigned long)to < TASK_SIZE) {
9240- over = (unsigned long)to + n - TASK_SIZE;
9241- return __copy_tofrom_user(to, (__force void __user *)from,
9242- n - over) + over;
9243- }
9244- return n;
9245-}
9246-
9247-#else /* __powerpc64__ */
9248-
9249-#define __copy_in_user(to, from, size) \
9250- __copy_tofrom_user((to), (from), (size))
9251-
9252-extern unsigned long copy_from_user(void *to, const void __user *from,
9253- unsigned long n);
9254-extern unsigned long copy_to_user(void __user *to, const void *from,
9255- unsigned long n);
9256-extern unsigned long copy_in_user(void __user *to, const void __user *from,
9257- unsigned long n);
9258-
9259-#endif /* __powerpc64__ */
9260-
9261 static inline unsigned long __copy_from_user_inatomic(void *to,
9262 const void __user *from, unsigned long n)
9263 {
9264@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
9265 if (ret == 0)
9266 return 0;
9267 }
9268+
9269+ if (!__builtin_constant_p(n))
9270+ check_object_size(to, n, false);
9271+
9272 return __copy_tofrom_user((__force void __user *)to, from, n);
9273 }
9274
9275@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
9276 if (ret == 0)
9277 return 0;
9278 }
9279+
9280+ if (!__builtin_constant_p(n))
9281+ check_object_size(from, n, true);
9282+
9283 return __copy_tofrom_user(to, (__force const void __user *)from, n);
9284 }
9285
9286@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
9287 return __copy_to_user_inatomic(to, from, size);
9288 }
9289
9290+#ifndef __powerpc64__
9291+
9292+static inline unsigned long __must_check copy_from_user(void *to,
9293+ const void __user *from, unsigned long n)
9294+{
9295+ unsigned long over;
9296+
9297+ if ((long)n < 0)
9298+ return n;
9299+
9300+ if (access_ok(VERIFY_READ, from, n)) {
9301+ if (!__builtin_constant_p(n))
9302+ check_object_size(to, n, false);
9303+ return __copy_tofrom_user((__force void __user *)to, from, n);
9304+ }
9305+ if ((unsigned long)from < TASK_SIZE) {
9306+ over = (unsigned long)from + n - TASK_SIZE;
9307+ if (!__builtin_constant_p(n - over))
9308+ check_object_size(to, n - over, false);
9309+ return __copy_tofrom_user((__force void __user *)to, from,
9310+ n - over) + over;
9311+ }
9312+ return n;
9313+}
9314+
9315+static inline unsigned long __must_check copy_to_user(void __user *to,
9316+ const void *from, unsigned long n)
9317+{
9318+ unsigned long over;
9319+
9320+ if ((long)n < 0)
9321+ return n;
9322+
9323+ if (access_ok(VERIFY_WRITE, to, n)) {
9324+ if (!__builtin_constant_p(n))
9325+ check_object_size(from, n, true);
9326+ return __copy_tofrom_user(to, (__force void __user *)from, n);
9327+ }
9328+ if ((unsigned long)to < TASK_SIZE) {
9329+ over = (unsigned long)to + n - TASK_SIZE;
9330+ if (!__builtin_constant_p(n))
9331+ check_object_size(from, n - over, true);
9332+ return __copy_tofrom_user(to, (__force void __user *)from,
9333+ n - over) + over;
9334+ }
9335+ return n;
9336+}
9337+
9338+#else /* __powerpc64__ */
9339+
9340+#define __copy_in_user(to, from, size) \
9341+ __copy_tofrom_user((to), (from), (size))
9342+
9343+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
9344+{
9345+ if ((long)n < 0 || n > INT_MAX)
9346+ return n;
9347+
9348+ if (!__builtin_constant_p(n))
9349+ check_object_size(to, n, false);
9350+
9351+ if (likely(access_ok(VERIFY_READ, from, n)))
9352+ n = __copy_from_user(to, from, n);
9353+ else
9354+ memset(to, 0, n);
9355+ return n;
9356+}
9357+
9358+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
9359+{
9360+ if ((long)n < 0 || n > INT_MAX)
9361+ return n;
9362+
9363+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
9364+ if (!__builtin_constant_p(n))
9365+ check_object_size(from, n, true);
9366+ n = __copy_to_user(to, from, n);
9367+ }
9368+ return n;
9369+}
9370+
9371+extern unsigned long copy_in_user(void __user *to, const void __user *from,
9372+ unsigned long n);
9373+
9374+#endif /* __powerpc64__ */
9375+
9376 extern unsigned long __clear_user(void __user *addr, unsigned long size);
9377
9378 static inline unsigned long clear_user(void __user *addr, unsigned long size)
9379diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
9380index 670c312..60c2b52 100644
9381--- a/arch/powerpc/kernel/Makefile
9382+++ b/arch/powerpc/kernel/Makefile
9383@@ -27,6 +27,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
9384 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
9385 endif
9386
9387+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
9388+
9389 obj-y := cputable.o ptrace.o syscalls.o \
9390 irq.o align.o signal_32.o pmc.o vdso.o \
9391 process.o systbl.o idle.o \
9392diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
9393index bb9cac6..5181202 100644
9394--- a/arch/powerpc/kernel/exceptions-64e.S
9395+++ b/arch/powerpc/kernel/exceptions-64e.S
9396@@ -1010,6 +1010,7 @@ storage_fault_common:
9397 std r14,_DAR(r1)
9398 std r15,_DSISR(r1)
9399 addi r3,r1,STACK_FRAME_OVERHEAD
9400+ bl save_nvgprs
9401 mr r4,r14
9402 mr r5,r15
9403 ld r14,PACA_EXGEN+EX_R14(r13)
9404@@ -1018,8 +1019,7 @@ storage_fault_common:
9405 cmpdi r3,0
9406 bne- 1f
9407 b ret_from_except_lite
9408-1: bl save_nvgprs
9409- mr r5,r3
9410+1: mr r5,r3
9411 addi r3,r1,STACK_FRAME_OVERHEAD
9412 ld r4,_DAR(r1)
9413 bl bad_page_fault
9414diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
9415index 050f79a..f385bfe 100644
9416--- a/arch/powerpc/kernel/exceptions-64s.S
9417+++ b/arch/powerpc/kernel/exceptions-64s.S
9418@@ -1593,10 +1593,10 @@ handle_page_fault:
9419 11: ld r4,_DAR(r1)
9420 ld r5,_DSISR(r1)
9421 addi r3,r1,STACK_FRAME_OVERHEAD
9422+ bl save_nvgprs
9423 bl do_page_fault
9424 cmpdi r3,0
9425 beq+ 12f
9426- bl save_nvgprs
9427 mr r5,r3
9428 addi r3,r1,STACK_FRAME_OVERHEAD
9429 lwz r4,_DAR(r1)
9430diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
9431index 4c5891d..a5d88bb 100644
9432--- a/arch/powerpc/kernel/irq.c
9433+++ b/arch/powerpc/kernel/irq.c
9434@@ -461,6 +461,8 @@ void migrate_irqs(void)
9435 }
9436 #endif
9437
9438+extern void gr_handle_kernel_exploit(void);
9439+
9440 static inline void check_stack_overflow(void)
9441 {
9442 #ifdef CONFIG_DEBUG_STACKOVERFLOW
9443@@ -473,6 +475,7 @@ static inline void check_stack_overflow(void)
9444 printk("do_IRQ: stack overflow: %ld\n",
9445 sp - sizeof(struct thread_info));
9446 dump_stack();
9447+ gr_handle_kernel_exploit();
9448 }
9449 #endif
9450 }
9451diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
9452index 6cff040..74ac5d1b 100644
9453--- a/arch/powerpc/kernel/module_32.c
9454+++ b/arch/powerpc/kernel/module_32.c
9455@@ -161,7 +161,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
9456 me->arch.core_plt_section = i;
9457 }
9458 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
9459- printk("Module doesn't contain .plt or .init.plt sections.\n");
9460+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
9461 return -ENOEXEC;
9462 }
9463
9464@@ -191,11 +191,16 @@ static uint32_t do_plt_call(void *location,
9465
9466 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
9467 /* Init, or core PLT? */
9468- if (location >= mod->module_core
9469- && location < mod->module_core + mod->core_size)
9470+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
9471+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
9472 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
9473- else
9474+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
9475+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
9476 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
9477+ else {
9478+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
9479+ return ~0UL;
9480+ }
9481
9482 /* Find this entry, or if that fails, the next avail. entry */
9483 while (entry->jump[0]) {
9484@@ -299,7 +304,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
9485 }
9486 #ifdef CONFIG_DYNAMIC_FTRACE
9487 module->arch.tramp =
9488- do_plt_call(module->module_core,
9489+ do_plt_call(module->module_core_rx,
9490 (unsigned long)ftrace_caller,
9491 sechdrs, module);
9492 #endif
9493diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
9494index bf44ae9..6d2ce71 100644
9495--- a/arch/powerpc/kernel/process.c
9496+++ b/arch/powerpc/kernel/process.c
9497@@ -1039,8 +1039,8 @@ void show_regs(struct pt_regs * regs)
9498 * Lookup NIP late so we have the best change of getting the
9499 * above info out without failing
9500 */
9501- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
9502- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
9503+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
9504+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
9505 #endif
9506 show_stack(current, (unsigned long *) regs->gpr[1]);
9507 if (!user_mode(regs))
9508@@ -1558,10 +1558,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
9509 newsp = stack[0];
9510 ip = stack[STACK_FRAME_LR_SAVE];
9511 if (!firstframe || ip != lr) {
9512- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
9513+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
9514 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
9515 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
9516- printk(" (%pS)",
9517+ printk(" (%pA)",
9518 (void *)current->ret_stack[curr_frame].ret);
9519 curr_frame--;
9520 }
9521@@ -1581,7 +1581,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
9522 struct pt_regs *regs = (struct pt_regs *)
9523 (sp + STACK_FRAME_OVERHEAD);
9524 lr = regs->link;
9525- printk("--- interrupt: %lx at %pS\n LR = %pS\n",
9526+ printk("--- interrupt: %lx at %pA\n LR = %pA\n",
9527 regs->trap, (void *)regs->nip, (void *)lr);
9528 firstframe = 1;
9529 }
9530@@ -1617,58 +1617,3 @@ void notrace __ppc64_runlatch_off(void)
9531 mtspr(SPRN_CTRLT, ctrl);
9532 }
9533 #endif /* CONFIG_PPC64 */
9534-
9535-unsigned long arch_align_stack(unsigned long sp)
9536-{
9537- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9538- sp -= get_random_int() & ~PAGE_MASK;
9539- return sp & ~0xf;
9540-}
9541-
9542-static inline unsigned long brk_rnd(void)
9543-{
9544- unsigned long rnd = 0;
9545-
9546- /* 8MB for 32bit, 1GB for 64bit */
9547- if (is_32bit_task())
9548- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
9549- else
9550- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
9551-
9552- return rnd << PAGE_SHIFT;
9553-}
9554-
9555-unsigned long arch_randomize_brk(struct mm_struct *mm)
9556-{
9557- unsigned long base = mm->brk;
9558- unsigned long ret;
9559-
9560-#ifdef CONFIG_PPC_STD_MMU_64
9561- /*
9562- * If we are using 1TB segments and we are allowed to randomise
9563- * the heap, we can put it above 1TB so it is backed by a 1TB
9564- * segment. Otherwise the heap will be in the bottom 1TB
9565- * which always uses 256MB segments and this may result in a
9566- * performance penalty.
9567- */
9568- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
9569- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
9570-#endif
9571-
9572- ret = PAGE_ALIGN(base + brk_rnd());
9573-
9574- if (ret < mm->brk)
9575- return mm->brk;
9576-
9577- return ret;
9578-}
9579-
9580-unsigned long randomize_et_dyn(unsigned long base)
9581-{
9582- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
9583-
9584- if (ret < base)
9585- return base;
9586-
9587- return ret;
9588-}
9589diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
9590index 2e3d2bf..35df241 100644
9591--- a/arch/powerpc/kernel/ptrace.c
9592+++ b/arch/powerpc/kernel/ptrace.c
9593@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
9594 return ret;
9595 }
9596
9597+#ifdef CONFIG_GRKERNSEC_SETXID
9598+extern void gr_delayed_cred_worker(void);
9599+#endif
9600+
9601 /*
9602 * We must return the syscall number to actually look up in the table.
9603 * This can be -1L to skip running any syscall at all.
9604@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
9605
9606 secure_computing_strict(regs->gpr[0]);
9607
9608+#ifdef CONFIG_GRKERNSEC_SETXID
9609+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9610+ gr_delayed_cred_worker();
9611+#endif
9612+
9613 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
9614 tracehook_report_syscall_entry(regs))
9615 /*
9616@@ -1808,6 +1817,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
9617 {
9618 int step;
9619
9620+#ifdef CONFIG_GRKERNSEC_SETXID
9621+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9622+ gr_delayed_cred_worker();
9623+#endif
9624+
9625 audit_syscall_exit(regs);
9626
9627 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9628diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
9629index b171001..4ac7ac5 100644
9630--- a/arch/powerpc/kernel/signal_32.c
9631+++ b/arch/powerpc/kernel/signal_32.c
9632@@ -1011,7 +1011,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
9633 /* Save user registers on the stack */
9634 frame = &rt_sf->uc.uc_mcontext;
9635 addr = frame;
9636- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
9637+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9638 sigret = 0;
9639 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
9640 } else {
9641diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
9642index 2cb0c94..c0c0bc9 100644
9643--- a/arch/powerpc/kernel/signal_64.c
9644+++ b/arch/powerpc/kernel/signal_64.c
9645@@ -754,7 +754,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
9646 current->thread.fp_state.fpscr = 0;
9647
9648 /* Set up to return from userspace. */
9649- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
9650+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9651 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
9652 } else {
9653 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
9654diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
9655index 0dc43f9..a885d33 100644
9656--- a/arch/powerpc/kernel/traps.c
9657+++ b/arch/powerpc/kernel/traps.c
9658@@ -36,6 +36,7 @@
9659 #include <linux/debugfs.h>
9660 #include <linux/ratelimit.h>
9661 #include <linux/context_tracking.h>
9662+#include <linux/uaccess.h>
9663
9664 #include <asm/emulated_ops.h>
9665 #include <asm/pgtable.h>
9666@@ -142,6 +143,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
9667 return flags;
9668 }
9669
9670+extern void gr_handle_kernel_exploit(void);
9671+
9672 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9673 int signr)
9674 {
9675@@ -191,6 +194,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9676 panic("Fatal exception in interrupt");
9677 if (panic_on_oops)
9678 panic("Fatal exception");
9679+
9680+ gr_handle_kernel_exploit();
9681+
9682 do_exit(signr);
9683 }
9684
9685@@ -1137,6 +1143,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
9686 enum ctx_state prev_state = exception_enter();
9687 unsigned int reason = get_reason(regs);
9688
9689+#ifdef CONFIG_PAX_REFCOUNT
9690+ unsigned int bkpt;
9691+ const struct exception_table_entry *entry;
9692+
9693+ if (reason & REASON_ILLEGAL) {
9694+ /* Check if PaX bad instruction */
9695+ if (!probe_kernel_address(regs->nip, bkpt) && bkpt == 0xc00b00) {
9696+ current->thread.trap_nr = 0;
9697+ pax_report_refcount_overflow(regs);
9698+ /* fixup_exception() for PowerPC does not exist, simulate its job */
9699+ if ((entry = search_exception_tables(regs->nip)) != NULL) {
9700+ regs->nip = entry->fixup;
9701+ return;
9702+ }
9703+ /* fixup_exception() could not handle */
9704+ goto bail;
9705+ }
9706+ }
9707+#endif
9708+
9709 /* We can now get here via a FP Unavailable exception if the core
9710 * has no FPU, in that case the reason flags will be 0 */
9711
9712diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
9713index f174351..5722009 100644
9714--- a/arch/powerpc/kernel/vdso.c
9715+++ b/arch/powerpc/kernel/vdso.c
9716@@ -35,6 +35,7 @@
9717 #include <asm/vdso.h>
9718 #include <asm/vdso_datapage.h>
9719 #include <asm/setup.h>
9720+#include <asm/mman.h>
9721
9722 #undef DEBUG
9723
9724@@ -221,7 +222,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9725 vdso_base = VDSO32_MBASE;
9726 #endif
9727
9728- current->mm->context.vdso_base = 0;
9729+ current->mm->context.vdso_base = ~0UL;
9730
9731 /* vDSO has a problem and was disabled, just don't "enable" it for the
9732 * process
9733@@ -241,7 +242,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9734 vdso_base = get_unmapped_area(NULL, vdso_base,
9735 (vdso_pages << PAGE_SHIFT) +
9736 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
9737- 0, 0);
9738+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
9739 if (IS_ERR_VALUE(vdso_base)) {
9740 rc = vdso_base;
9741 goto fail_mmapsem;
9742diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
9743index 4c79284..0e462c3 100644
9744--- a/arch/powerpc/kvm/powerpc.c
9745+++ b/arch/powerpc/kvm/powerpc.c
9746@@ -1338,7 +1338,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
9747 }
9748 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
9749
9750-int kvm_arch_init(void *opaque)
9751+int kvm_arch_init(const void *opaque)
9752 {
9753 return 0;
9754 }
9755diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
9756index 5eea6f3..5d10396 100644
9757--- a/arch/powerpc/lib/usercopy_64.c
9758+++ b/arch/powerpc/lib/usercopy_64.c
9759@@ -9,22 +9,6 @@
9760 #include <linux/module.h>
9761 #include <asm/uaccess.h>
9762
9763-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9764-{
9765- if (likely(access_ok(VERIFY_READ, from, n)))
9766- n = __copy_from_user(to, from, n);
9767- else
9768- memset(to, 0, n);
9769- return n;
9770-}
9771-
9772-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9773-{
9774- if (likely(access_ok(VERIFY_WRITE, to, n)))
9775- n = __copy_to_user(to, from, n);
9776- return n;
9777-}
9778-
9779 unsigned long copy_in_user(void __user *to, const void __user *from,
9780 unsigned long n)
9781 {
9782@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
9783 return n;
9784 }
9785
9786-EXPORT_SYMBOL(copy_from_user);
9787-EXPORT_SYMBOL(copy_to_user);
9788 EXPORT_SYMBOL(copy_in_user);
9789
9790diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
9791index 51ab9e7..7d3c78b 100644
9792--- a/arch/powerpc/mm/fault.c
9793+++ b/arch/powerpc/mm/fault.c
9794@@ -33,6 +33,10 @@
9795 #include <linux/magic.h>
9796 #include <linux/ratelimit.h>
9797 #include <linux/context_tracking.h>
9798+#include <linux/slab.h>
9799+#include <linux/pagemap.h>
9800+#include <linux/compiler.h>
9801+#include <linux/unistd.h>
9802
9803 #include <asm/firmware.h>
9804 #include <asm/page.h>
9805@@ -69,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
9806 }
9807 #endif
9808
9809+#ifdef CONFIG_PAX_PAGEEXEC
9810+/*
9811+ * PaX: decide what to do with offenders (regs->nip = fault address)
9812+ *
9813+ * returns 1 when task should be killed
9814+ */
9815+static int pax_handle_fetch_fault(struct pt_regs *regs)
9816+{
9817+ return 1;
9818+}
9819+
9820+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9821+{
9822+ unsigned long i;
9823+
9824+ printk(KERN_ERR "PAX: bytes at PC: ");
9825+ for (i = 0; i < 5; i++) {
9826+ unsigned int c;
9827+ if (get_user(c, (unsigned int __user *)pc+i))
9828+ printk(KERN_CONT "???????? ");
9829+ else
9830+ printk(KERN_CONT "%08x ", c);
9831+ }
9832+ printk("\n");
9833+}
9834+#endif
9835+
9836 /*
9837 * Check whether the instruction at regs->nip is a store using
9838 * an update addressing form which will update r1.
9839@@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
9840 * indicate errors in DSISR but can validly be set in SRR1.
9841 */
9842 if (trap == 0x400)
9843- error_code &= 0x48200000;
9844+ error_code &= 0x58200000;
9845 else
9846 is_write = error_code & DSISR_ISSTORE;
9847 #else
9848@@ -378,7 +409,7 @@ good_area:
9849 * "undefined". Of those that can be set, this is the only
9850 * one which seems bad.
9851 */
9852- if (error_code & 0x10000000)
9853+ if (error_code & DSISR_GUARDED)
9854 /* Guarded storage error. */
9855 goto bad_area;
9856 #endif /* CONFIG_8xx */
9857@@ -393,7 +424,7 @@ good_area:
9858 * processors use the same I/D cache coherency mechanism
9859 * as embedded.
9860 */
9861- if (error_code & DSISR_PROTFAULT)
9862+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
9863 goto bad_area;
9864 #endif /* CONFIG_PPC_STD_MMU */
9865
9866@@ -483,6 +514,23 @@ bad_area:
9867 bad_area_nosemaphore:
9868 /* User mode accesses cause a SIGSEGV */
9869 if (user_mode(regs)) {
9870+
9871+#ifdef CONFIG_PAX_PAGEEXEC
9872+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
9873+#ifdef CONFIG_PPC_STD_MMU
9874+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
9875+#else
9876+ if (is_exec && regs->nip == address) {
9877+#endif
9878+ switch (pax_handle_fetch_fault(regs)) {
9879+ }
9880+
9881+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
9882+ do_group_exit(SIGKILL);
9883+ }
9884+ }
9885+#endif
9886+
9887 _exception(SIGSEGV, regs, code, address);
9888 goto bail;
9889 }
9890diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
9891index cb8bdbe..cde4bc7 100644
9892--- a/arch/powerpc/mm/mmap.c
9893+++ b/arch/powerpc/mm/mmap.c
9894@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
9895 return sysctl_legacy_va_layout;
9896 }
9897
9898-static unsigned long mmap_rnd(void)
9899+static unsigned long mmap_rnd(struct mm_struct *mm)
9900 {
9901 unsigned long rnd = 0;
9902
9903+#ifdef CONFIG_PAX_RANDMMAP
9904+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9905+#endif
9906+
9907 if (current->flags & PF_RANDOMIZE) {
9908 /* 8MB for 32bit, 1GB for 64bit */
9909 if (is_32bit_task())
9910@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
9911 return rnd << PAGE_SHIFT;
9912 }
9913
9914-static inline unsigned long mmap_base(void)
9915+static inline unsigned long mmap_base(struct mm_struct *mm)
9916 {
9917 unsigned long gap = rlimit(RLIMIT_STACK);
9918
9919@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void)
9920 else if (gap > MAX_GAP)
9921 gap = MAX_GAP;
9922
9923- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
9924+ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
9925 }
9926
9927 /*
9928@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9929 */
9930 if (mmap_is_legacy()) {
9931 mm->mmap_base = TASK_UNMAPPED_BASE;
9932+
9933+#ifdef CONFIG_PAX_RANDMMAP
9934+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9935+ mm->mmap_base += mm->delta_mmap;
9936+#endif
9937+
9938 mm->get_unmapped_area = arch_get_unmapped_area;
9939 } else {
9940- mm->mmap_base = mmap_base();
9941+ mm->mmap_base = mmap_base(mm);
9942+
9943+#ifdef CONFIG_PAX_RANDMMAP
9944+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9945+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9946+#endif
9947+
9948 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9949 }
9950 }
9951diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
9952index b0c75cc..ef7fb93 100644
9953--- a/arch/powerpc/mm/slice.c
9954+++ b/arch/powerpc/mm/slice.c
9955@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
9956 if ((mm->task_size - len) < addr)
9957 return 0;
9958 vma = find_vma(mm, addr);
9959- return (!vma || (addr + len) <= vma->vm_start);
9960+ return check_heap_stack_gap(vma, addr, len, 0);
9961 }
9962
9963 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
9964@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
9965 info.align_offset = 0;
9966
9967 addr = TASK_UNMAPPED_BASE;
9968+
9969+#ifdef CONFIG_PAX_RANDMMAP
9970+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9971+ addr += mm->delta_mmap;
9972+#endif
9973+
9974 while (addr < TASK_SIZE) {
9975 info.low_limit = addr;
9976 if (!slice_scan_available(addr, available, 1, &addr))
9977@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
9978 if (fixed && addr > (mm->task_size - len))
9979 return -ENOMEM;
9980
9981+#ifdef CONFIG_PAX_RANDMMAP
9982+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
9983+ addr = 0;
9984+#endif
9985+
9986 /* If hint, make sure it matches our alignment restrictions */
9987 if (!fixed && addr) {
9988 addr = _ALIGN_UP(addr, 1ul << pshift);
9989diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
9990index 3afa6f4..40c53ff 100644
9991--- a/arch/powerpc/net/bpf_jit_comp.c
9992+++ b/arch/powerpc/net/bpf_jit_comp.c
9993@@ -697,5 +697,6 @@ void bpf_jit_free(struct bpf_prog *fp)
9994 {
9995 if (fp->jited)
9996 module_free(NULL, fp->bpf_func);
9997- kfree(fp);
9998+
9999+ bpf_prog_unlock_free(fp);
10000 }
10001diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
10002index 4278acf..67fd0e6 100644
10003--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
10004+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
10005@@ -400,8 +400,8 @@ static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
10006 }
10007
10008 static struct pci_ops scc_pciex_pci_ops = {
10009- scc_pciex_read_config,
10010- scc_pciex_write_config,
10011+ .read = scc_pciex_read_config,
10012+ .write = scc_pciex_write_config,
10013 };
10014
10015 static void pciex_clear_intr_all(unsigned int __iomem *base)
10016diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
10017index d966bbe..372124a 100644
10018--- a/arch/powerpc/platforms/cell/spufs/file.c
10019+++ b/arch/powerpc/platforms/cell/spufs/file.c
10020@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
10021 return VM_FAULT_NOPAGE;
10022 }
10023
10024-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
10025+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
10026 unsigned long address,
10027- void *buf, int len, int write)
10028+ void *buf, size_t len, int write)
10029 {
10030 struct spu_context *ctx = vma->vm_file->private_data;
10031 unsigned long offset = address - vma->vm_start;
10032diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
10033index fa934fe..c296056 100644
10034--- a/arch/s390/include/asm/atomic.h
10035+++ b/arch/s390/include/asm/atomic.h
10036@@ -412,4 +412,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
10037 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
10038 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10039
10040+#define atomic64_read_unchecked(v) atomic64_read(v)
10041+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
10042+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
10043+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
10044+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
10045+#define atomic64_inc_unchecked(v) atomic64_inc(v)
10046+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
10047+#define atomic64_dec_unchecked(v) atomic64_dec(v)
10048+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
10049+
10050 #endif /* __ARCH_S390_ATOMIC__ */
10051diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
10052index 19ff956..8d39cb1 100644
10053--- a/arch/s390/include/asm/barrier.h
10054+++ b/arch/s390/include/asm/barrier.h
10055@@ -37,7 +37,7 @@
10056 do { \
10057 compiletime_assert_atomic_type(*p); \
10058 barrier(); \
10059- ACCESS_ONCE(*p) = (v); \
10060+ ACCESS_ONCE_RW(*p) = (v); \
10061 } while (0)
10062
10063 #define smp_load_acquire(p) \
10064diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
10065index 4d7ccac..d03d0ad 100644
10066--- a/arch/s390/include/asm/cache.h
10067+++ b/arch/s390/include/asm/cache.h
10068@@ -9,8 +9,10 @@
10069 #ifndef __ARCH_S390_CACHE_H
10070 #define __ARCH_S390_CACHE_H
10071
10072-#define L1_CACHE_BYTES 256
10073+#include <linux/const.h>
10074+
10075 #define L1_CACHE_SHIFT 8
10076+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10077 #define NET_SKB_PAD 32
10078
10079 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
10080diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
10081index 78f4f87..598ce39 100644
10082--- a/arch/s390/include/asm/elf.h
10083+++ b/arch/s390/include/asm/elf.h
10084@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
10085 the loader. We need to make sure that it is out of the way of the program
10086 that it will "exec", and that there is sufficient room for the brk. */
10087
10088-extern unsigned long randomize_et_dyn(unsigned long base);
10089-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
10090+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
10091+
10092+#ifdef CONFIG_PAX_ASLR
10093+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
10094+
10095+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
10096+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
10097+#endif
10098
10099 /* This yields a mask that user programs can use to figure out what
10100 instruction set this CPU supports. */
10101@@ -222,9 +228,6 @@ struct linux_binprm;
10102 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
10103 int arch_setup_additional_pages(struct linux_binprm *, int);
10104
10105-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10106-#define arch_randomize_brk arch_randomize_brk
10107-
10108 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
10109
10110 #endif
10111diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
10112index c4a93d6..4d2a9b4 100644
10113--- a/arch/s390/include/asm/exec.h
10114+++ b/arch/s390/include/asm/exec.h
10115@@ -7,6 +7,6 @@
10116 #ifndef __ASM_EXEC_H
10117 #define __ASM_EXEC_H
10118
10119-extern unsigned long arch_align_stack(unsigned long sp);
10120+#define arch_align_stack(x) ((x) & ~0xfUL)
10121
10122 #endif /* __ASM_EXEC_H */
10123diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
10124index cd4c68e..6764641 100644
10125--- a/arch/s390/include/asm/uaccess.h
10126+++ b/arch/s390/include/asm/uaccess.h
10127@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
10128 __range_ok((unsigned long)(addr), (size)); \
10129 })
10130
10131+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
10132 #define access_ok(type, addr, size) __access_ok(addr, size)
10133
10134 /*
10135@@ -275,6 +276,10 @@ static inline unsigned long __must_check
10136 copy_to_user(void __user *to, const void *from, unsigned long n)
10137 {
10138 might_fault();
10139+
10140+ if ((long)n < 0)
10141+ return n;
10142+
10143 return __copy_to_user(to, from, n);
10144 }
10145
10146@@ -303,10 +308,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
10147 static inline unsigned long __must_check
10148 copy_from_user(void *to, const void __user *from, unsigned long n)
10149 {
10150- unsigned int sz = __compiletime_object_size(to);
10151+ size_t sz = __compiletime_object_size(to);
10152
10153 might_fault();
10154- if (unlikely(sz != -1 && sz < n)) {
10155+
10156+ if ((long)n < 0)
10157+ return n;
10158+
10159+ if (unlikely(sz != (size_t)-1 && sz < n)) {
10160 copy_from_user_overflow();
10161 return n;
10162 }
10163diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
10164index b89b591..fd9609d 100644
10165--- a/arch/s390/kernel/module.c
10166+++ b/arch/s390/kernel/module.c
10167@@ -169,11 +169,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
10168
10169 /* Increase core size by size of got & plt and set start
10170 offsets for got and plt. */
10171- me->core_size = ALIGN(me->core_size, 4);
10172- me->arch.got_offset = me->core_size;
10173- me->core_size += me->arch.got_size;
10174- me->arch.plt_offset = me->core_size;
10175- me->core_size += me->arch.plt_size;
10176+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
10177+ me->arch.got_offset = me->core_size_rw;
10178+ me->core_size_rw += me->arch.got_size;
10179+ me->arch.plt_offset = me->core_size_rx;
10180+ me->core_size_rx += me->arch.plt_size;
10181 return 0;
10182 }
10183
10184@@ -289,7 +289,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
10185 if (info->got_initialized == 0) {
10186 Elf_Addr *gotent;
10187
10188- gotent = me->module_core + me->arch.got_offset +
10189+ gotent = me->module_core_rw + me->arch.got_offset +
10190 info->got_offset;
10191 *gotent = val;
10192 info->got_initialized = 1;
10193@@ -312,7 +312,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
10194 rc = apply_rela_bits(loc, val, 0, 64, 0);
10195 else if (r_type == R_390_GOTENT ||
10196 r_type == R_390_GOTPLTENT) {
10197- val += (Elf_Addr) me->module_core - loc;
10198+ val += (Elf_Addr) me->module_core_rw - loc;
10199 rc = apply_rela_bits(loc, val, 1, 32, 1);
10200 }
10201 break;
10202@@ -325,7 +325,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
10203 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
10204 if (info->plt_initialized == 0) {
10205 unsigned int *ip;
10206- ip = me->module_core + me->arch.plt_offset +
10207+ ip = me->module_core_rx + me->arch.plt_offset +
10208 info->plt_offset;
10209 #ifndef CONFIG_64BIT
10210 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
10211@@ -350,7 +350,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
10212 val - loc + 0xffffUL < 0x1ffffeUL) ||
10213 (r_type == R_390_PLT32DBL &&
10214 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
10215- val = (Elf_Addr) me->module_core +
10216+ val = (Elf_Addr) me->module_core_rx +
10217 me->arch.plt_offset +
10218 info->plt_offset;
10219 val += rela->r_addend - loc;
10220@@ -372,7 +372,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
10221 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
10222 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
10223 val = val + rela->r_addend -
10224- ((Elf_Addr) me->module_core + me->arch.got_offset);
10225+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
10226 if (r_type == R_390_GOTOFF16)
10227 rc = apply_rela_bits(loc, val, 0, 16, 0);
10228 else if (r_type == R_390_GOTOFF32)
10229@@ -382,7 +382,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
10230 break;
10231 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
10232 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
10233- val = (Elf_Addr) me->module_core + me->arch.got_offset +
10234+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
10235 rela->r_addend - loc;
10236 if (r_type == R_390_GOTPC)
10237 rc = apply_rela_bits(loc, val, 1, 32, 0);
10238diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
10239index 93b9ca4..4ea1454 100644
10240--- a/arch/s390/kernel/process.c
10241+++ b/arch/s390/kernel/process.c
10242@@ -242,37 +242,3 @@ unsigned long get_wchan(struct task_struct *p)
10243 }
10244 return 0;
10245 }
10246-
10247-unsigned long arch_align_stack(unsigned long sp)
10248-{
10249- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
10250- sp -= get_random_int() & ~PAGE_MASK;
10251- return sp & ~0xf;
10252-}
10253-
10254-static inline unsigned long brk_rnd(void)
10255-{
10256- /* 8MB for 32bit, 1GB for 64bit */
10257- if (is_32bit_task())
10258- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
10259- else
10260- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
10261-}
10262-
10263-unsigned long arch_randomize_brk(struct mm_struct *mm)
10264-{
10265- unsigned long ret;
10266-
10267- ret = PAGE_ALIGN(mm->brk + brk_rnd());
10268- return (ret > mm->brk) ? ret : mm->brk;
10269-}
10270-
10271-unsigned long randomize_et_dyn(unsigned long base)
10272-{
10273- unsigned long ret;
10274-
10275- if (!(current->flags & PF_RANDOMIZE))
10276- return base;
10277- ret = PAGE_ALIGN(base + brk_rnd());
10278- return (ret > base) ? ret : base;
10279-}
10280diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
10281index 9b436c2..54fbf0a 100644
10282--- a/arch/s390/mm/mmap.c
10283+++ b/arch/s390/mm/mmap.c
10284@@ -95,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10285 */
10286 if (mmap_is_legacy()) {
10287 mm->mmap_base = mmap_base_legacy();
10288+
10289+#ifdef CONFIG_PAX_RANDMMAP
10290+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10291+ mm->mmap_base += mm->delta_mmap;
10292+#endif
10293+
10294 mm->get_unmapped_area = arch_get_unmapped_area;
10295 } else {
10296 mm->mmap_base = mmap_base();
10297+
10298+#ifdef CONFIG_PAX_RANDMMAP
10299+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10300+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
10301+#endif
10302+
10303 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
10304 }
10305 }
10306@@ -170,9 +182,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10307 */
10308 if (mmap_is_legacy()) {
10309 mm->mmap_base = mmap_base_legacy();
10310+
10311+#ifdef CONFIG_PAX_RANDMMAP
10312+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10313+ mm->mmap_base += mm->delta_mmap;
10314+#endif
10315+
10316 mm->get_unmapped_area = s390_get_unmapped_area;
10317 } else {
10318 mm->mmap_base = mmap_base();
10319+
10320+#ifdef CONFIG_PAX_RANDMMAP
10321+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10322+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
10323+#endif
10324+
10325 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
10326 }
10327 }
10328diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
10329index 61e45b7..f2833c5 100644
10330--- a/arch/s390/net/bpf_jit_comp.c
10331+++ b/arch/s390/net/bpf_jit_comp.c
10332@@ -887,5 +887,5 @@ void bpf_jit_free(struct bpf_prog *fp)
10333 module_free(NULL, header);
10334
10335 free_filter:
10336- kfree(fp);
10337+ bpf_prog_unlock_free(fp);
10338 }
10339diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
10340index ae3d59f..f65f075 100644
10341--- a/arch/score/include/asm/cache.h
10342+++ b/arch/score/include/asm/cache.h
10343@@ -1,7 +1,9 @@
10344 #ifndef _ASM_SCORE_CACHE_H
10345 #define _ASM_SCORE_CACHE_H
10346
10347+#include <linux/const.h>
10348+
10349 #define L1_CACHE_SHIFT 4
10350-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10351+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10352
10353 #endif /* _ASM_SCORE_CACHE_H */
10354diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
10355index f9f3cd5..58ff438 100644
10356--- a/arch/score/include/asm/exec.h
10357+++ b/arch/score/include/asm/exec.h
10358@@ -1,6 +1,6 @@
10359 #ifndef _ASM_SCORE_EXEC_H
10360 #define _ASM_SCORE_EXEC_H
10361
10362-extern unsigned long arch_align_stack(unsigned long sp);
10363+#define arch_align_stack(x) (x)
10364
10365 #endif /* _ASM_SCORE_EXEC_H */
10366diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
10367index a1519ad3..e8ac1ff 100644
10368--- a/arch/score/kernel/process.c
10369+++ b/arch/score/kernel/process.c
10370@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
10371
10372 return task_pt_regs(task)->cp0_epc;
10373 }
10374-
10375-unsigned long arch_align_stack(unsigned long sp)
10376-{
10377- return sp;
10378-}
10379diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
10380index ef9e555..331bd29 100644
10381--- a/arch/sh/include/asm/cache.h
10382+++ b/arch/sh/include/asm/cache.h
10383@@ -9,10 +9,11 @@
10384 #define __ASM_SH_CACHE_H
10385 #ifdef __KERNEL__
10386
10387+#include <linux/const.h>
10388 #include <linux/init.h>
10389 #include <cpu/cache.h>
10390
10391-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10392+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10393
10394 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
10395
10396diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
10397index 6777177..cb5e44f 100644
10398--- a/arch/sh/mm/mmap.c
10399+++ b/arch/sh/mm/mmap.c
10400@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
10401 struct mm_struct *mm = current->mm;
10402 struct vm_area_struct *vma;
10403 int do_colour_align;
10404+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10405 struct vm_unmapped_area_info info;
10406
10407 if (flags & MAP_FIXED) {
10408@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
10409 if (filp || (flags & MAP_SHARED))
10410 do_colour_align = 1;
10411
10412+#ifdef CONFIG_PAX_RANDMMAP
10413+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10414+#endif
10415+
10416 if (addr) {
10417 if (do_colour_align)
10418 addr = COLOUR_ALIGN(addr, pgoff);
10419@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
10420 addr = PAGE_ALIGN(addr);
10421
10422 vma = find_vma(mm, addr);
10423- if (TASK_SIZE - len >= addr &&
10424- (!vma || addr + len <= vma->vm_start))
10425+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10426 return addr;
10427 }
10428
10429 info.flags = 0;
10430 info.length = len;
10431- info.low_limit = TASK_UNMAPPED_BASE;
10432+ info.low_limit = mm->mmap_base;
10433 info.high_limit = TASK_SIZE;
10434 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
10435 info.align_offset = pgoff << PAGE_SHIFT;
10436@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10437 struct mm_struct *mm = current->mm;
10438 unsigned long addr = addr0;
10439 int do_colour_align;
10440+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10441 struct vm_unmapped_area_info info;
10442
10443 if (flags & MAP_FIXED) {
10444@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10445 if (filp || (flags & MAP_SHARED))
10446 do_colour_align = 1;
10447
10448+#ifdef CONFIG_PAX_RANDMMAP
10449+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10450+#endif
10451+
10452 /* requesting a specific address */
10453 if (addr) {
10454 if (do_colour_align)
10455@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10456 addr = PAGE_ALIGN(addr);
10457
10458 vma = find_vma(mm, addr);
10459- if (TASK_SIZE - len >= addr &&
10460- (!vma || addr + len <= vma->vm_start))
10461+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10462 return addr;
10463 }
10464
10465@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10466 VM_BUG_ON(addr != -ENOMEM);
10467 info.flags = 0;
10468 info.low_limit = TASK_UNMAPPED_BASE;
10469+
10470+#ifdef CONFIG_PAX_RANDMMAP
10471+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10472+ info.low_limit += mm->delta_mmap;
10473+#endif
10474+
10475 info.high_limit = TASK_SIZE;
10476 addr = vm_unmapped_area(&info);
10477 }
10478diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
10479index bb894c8..8141d5c 100644
10480--- a/arch/sparc/include/asm/atomic_64.h
10481+++ b/arch/sparc/include/asm/atomic_64.h
10482@@ -15,18 +15,40 @@
10483 #define ATOMIC64_INIT(i) { (i) }
10484
10485 #define atomic_read(v) (*(volatile int *)&(v)->counter)
10486+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
10487+{
10488+ return v->counter;
10489+}
10490 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
10491+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
10492+{
10493+ return v->counter;
10494+}
10495
10496 #define atomic_set(v, i) (((v)->counter) = i)
10497+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
10498+{
10499+ v->counter = i;
10500+}
10501 #define atomic64_set(v, i) (((v)->counter) = i)
10502+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
10503+{
10504+ v->counter = i;
10505+}
10506
10507 void atomic_add(int, atomic_t *);
10508+void atomic_add_unchecked(int, atomic_unchecked_t *);
10509 void atomic64_add(long, atomic64_t *);
10510+void atomic64_add_unchecked(long, atomic64_unchecked_t *);
10511 void atomic_sub(int, atomic_t *);
10512+void atomic_sub_unchecked(int, atomic_unchecked_t *);
10513 void atomic64_sub(long, atomic64_t *);
10514+void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
10515
10516 int atomic_add_ret(int, atomic_t *);
10517+int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
10518 long atomic64_add_ret(long, atomic64_t *);
10519+long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
10520 int atomic_sub_ret(int, atomic_t *);
10521 long atomic64_sub_ret(long, atomic64_t *);
10522
10523@@ -34,13 +56,29 @@ long atomic64_sub_ret(long, atomic64_t *);
10524 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
10525
10526 #define atomic_inc_return(v) atomic_add_ret(1, v)
10527+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
10528+{
10529+ return atomic_add_ret_unchecked(1, v);
10530+}
10531 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
10532+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
10533+{
10534+ return atomic64_add_ret_unchecked(1, v);
10535+}
10536
10537 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
10538 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
10539
10540 #define atomic_add_return(i, v) atomic_add_ret(i, v)
10541+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
10542+{
10543+ return atomic_add_ret_unchecked(i, v);
10544+}
10545 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
10546+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
10547+{
10548+ return atomic64_add_ret_unchecked(i, v);
10549+}
10550
10551 /*
10552 * atomic_inc_and_test - increment and test
10553@@ -51,6 +89,10 @@ long atomic64_sub_ret(long, atomic64_t *);
10554 * other cases.
10555 */
10556 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
10557+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
10558+{
10559+ return atomic_inc_return_unchecked(v) == 0;
10560+}
10561 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
10562
10563 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
10564@@ -60,25 +102,60 @@ long atomic64_sub_ret(long, atomic64_t *);
10565 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
10566
10567 #define atomic_inc(v) atomic_add(1, v)
10568+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
10569+{
10570+ atomic_add_unchecked(1, v);
10571+}
10572 #define atomic64_inc(v) atomic64_add(1, v)
10573+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
10574+{
10575+ atomic64_add_unchecked(1, v);
10576+}
10577
10578 #define atomic_dec(v) atomic_sub(1, v)
10579+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
10580+{
10581+ atomic_sub_unchecked(1, v);
10582+}
10583 #define atomic64_dec(v) atomic64_sub(1, v)
10584+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
10585+{
10586+ atomic64_sub_unchecked(1, v);
10587+}
10588
10589 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
10590 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
10591
10592 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
10593+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
10594+{
10595+ return cmpxchg(&v->counter, old, new);
10596+}
10597 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
10598+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
10599+{
10600+ return xchg(&v->counter, new);
10601+}
10602
10603 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10604 {
10605- int c, old;
10606+ int c, old, new;
10607 c = atomic_read(v);
10608 for (;;) {
10609- if (unlikely(c == (u)))
10610+ if (unlikely(c == u))
10611 break;
10612- old = atomic_cmpxchg((v), c, c + (a));
10613+
10614+ asm volatile("addcc %2, %0, %0\n"
10615+
10616+#ifdef CONFIG_PAX_REFCOUNT
10617+ "tvs %%icc, 6\n"
10618+#endif
10619+
10620+ : "=r" (new)
10621+ : "0" (c), "ir" (a)
10622+ : "cc");
10623+
10624+ old = atomic_cmpxchg(v, c, new);
10625 if (likely(old == c))
10626 break;
10627 c = old;
10628@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10629 #define atomic64_cmpxchg(v, o, n) \
10630 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
10631 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
10632+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
10633+{
10634+ return xchg(&v->counter, new);
10635+}
10636
10637 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10638 {
10639- long c, old;
10640+ long c, old, new;
10641 c = atomic64_read(v);
10642 for (;;) {
10643- if (unlikely(c == (u)))
10644+ if (unlikely(c == u))
10645 break;
10646- old = atomic64_cmpxchg((v), c, c + (a));
10647+
10648+ asm volatile("addcc %2, %0, %0\n"
10649+
10650+#ifdef CONFIG_PAX_REFCOUNT
10651+ "tvs %%xcc, 6\n"
10652+#endif
10653+
10654+ : "=r" (new)
10655+ : "0" (c), "ir" (a)
10656+ : "cc");
10657+
10658+ old = atomic64_cmpxchg(v, c, new);
10659 if (likely(old == c))
10660 break;
10661 c = old;
10662 }
10663- return c != (u);
10664+ return c != u;
10665 }
10666
10667 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10668diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
10669index 305dcc3..7835030 100644
10670--- a/arch/sparc/include/asm/barrier_64.h
10671+++ b/arch/sparc/include/asm/barrier_64.h
10672@@ -57,7 +57,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
10673 do { \
10674 compiletime_assert_atomic_type(*p); \
10675 barrier(); \
10676- ACCESS_ONCE(*p) = (v); \
10677+ ACCESS_ONCE_RW(*p) = (v); \
10678 } while (0)
10679
10680 #define smp_load_acquire(p) \
10681diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
10682index 5bb6991..5c2132e 100644
10683--- a/arch/sparc/include/asm/cache.h
10684+++ b/arch/sparc/include/asm/cache.h
10685@@ -7,10 +7,12 @@
10686 #ifndef _SPARC_CACHE_H
10687 #define _SPARC_CACHE_H
10688
10689+#include <linux/const.h>
10690+
10691 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
10692
10693 #define L1_CACHE_SHIFT 5
10694-#define L1_CACHE_BYTES 32
10695+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10696
10697 #ifdef CONFIG_SPARC32
10698 #define SMP_CACHE_BYTES_SHIFT 5
10699diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
10700index a24e41f..47677ff 100644
10701--- a/arch/sparc/include/asm/elf_32.h
10702+++ b/arch/sparc/include/asm/elf_32.h
10703@@ -114,6 +114,13 @@ typedef struct {
10704
10705 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
10706
10707+#ifdef CONFIG_PAX_ASLR
10708+#define PAX_ELF_ET_DYN_BASE 0x10000UL
10709+
10710+#define PAX_DELTA_MMAP_LEN 16
10711+#define PAX_DELTA_STACK_LEN 16
10712+#endif
10713+
10714 /* This yields a mask that user programs can use to figure out what
10715 instruction set this cpu supports. This can NOT be done in userspace
10716 on Sparc. */
10717diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
10718index 370ca1e..d4f4a98 100644
10719--- a/arch/sparc/include/asm/elf_64.h
10720+++ b/arch/sparc/include/asm/elf_64.h
10721@@ -189,6 +189,13 @@ typedef struct {
10722 #define ELF_ET_DYN_BASE 0x0000010000000000UL
10723 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
10724
10725+#ifdef CONFIG_PAX_ASLR
10726+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
10727+
10728+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
10729+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
10730+#endif
10731+
10732 extern unsigned long sparc64_elf_hwcap;
10733 #define ELF_HWCAP sparc64_elf_hwcap
10734
10735diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
10736index a3890da..f6a408e 100644
10737--- a/arch/sparc/include/asm/pgalloc_32.h
10738+++ b/arch/sparc/include/asm/pgalloc_32.h
10739@@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
10740 }
10741
10742 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
10743+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10744
10745 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
10746 unsigned long address)
10747diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
10748index 5e31871..b71c9d7 100644
10749--- a/arch/sparc/include/asm/pgalloc_64.h
10750+++ b/arch/sparc/include/asm/pgalloc_64.h
10751@@ -38,6 +38,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
10752 }
10753
10754 #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
10755+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
10756
10757 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
10758 {
10759diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
10760index 59ba6f6..4518128 100644
10761--- a/arch/sparc/include/asm/pgtable.h
10762+++ b/arch/sparc/include/asm/pgtable.h
10763@@ -5,4 +5,8 @@
10764 #else
10765 #include <asm/pgtable_32.h>
10766 #endif
10767+
10768+#define ktla_ktva(addr) (addr)
10769+#define ktva_ktla(addr) (addr)
10770+
10771 #endif
10772diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
10773index b9b91ae..950b91e 100644
10774--- a/arch/sparc/include/asm/pgtable_32.h
10775+++ b/arch/sparc/include/asm/pgtable_32.h
10776@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
10777 #define PAGE_SHARED SRMMU_PAGE_SHARED
10778 #define PAGE_COPY SRMMU_PAGE_COPY
10779 #define PAGE_READONLY SRMMU_PAGE_RDONLY
10780+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
10781+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
10782+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
10783 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
10784
10785 /* Top-level page directory - dummy used by init-mm.
10786@@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
10787
10788 /* xwr */
10789 #define __P000 PAGE_NONE
10790-#define __P001 PAGE_READONLY
10791-#define __P010 PAGE_COPY
10792-#define __P011 PAGE_COPY
10793+#define __P001 PAGE_READONLY_NOEXEC
10794+#define __P010 PAGE_COPY_NOEXEC
10795+#define __P011 PAGE_COPY_NOEXEC
10796 #define __P100 PAGE_READONLY
10797 #define __P101 PAGE_READONLY
10798 #define __P110 PAGE_COPY
10799 #define __P111 PAGE_COPY
10800
10801 #define __S000 PAGE_NONE
10802-#define __S001 PAGE_READONLY
10803-#define __S010 PAGE_SHARED
10804-#define __S011 PAGE_SHARED
10805+#define __S001 PAGE_READONLY_NOEXEC
10806+#define __S010 PAGE_SHARED_NOEXEC
10807+#define __S011 PAGE_SHARED_NOEXEC
10808 #define __S100 PAGE_READONLY
10809 #define __S101 PAGE_READONLY
10810 #define __S110 PAGE_SHARED
10811diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
10812index 79da178..c2eede8 100644
10813--- a/arch/sparc/include/asm/pgtsrmmu.h
10814+++ b/arch/sparc/include/asm/pgtsrmmu.h
10815@@ -115,6 +115,11 @@
10816 SRMMU_EXEC | SRMMU_REF)
10817 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
10818 SRMMU_EXEC | SRMMU_REF)
10819+
10820+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
10821+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10822+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10823+
10824 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
10825 SRMMU_DIRTY | SRMMU_REF)
10826
10827diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
10828index 29d64b1..4272fe8 100644
10829--- a/arch/sparc/include/asm/setup.h
10830+++ b/arch/sparc/include/asm/setup.h
10831@@ -55,8 +55,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
10832 void handle_ld_nf(u32 insn, struct pt_regs *regs);
10833
10834 /* init_64.c */
10835-extern atomic_t dcpage_flushes;
10836-extern atomic_t dcpage_flushes_xcall;
10837+extern atomic_unchecked_t dcpage_flushes;
10838+extern atomic_unchecked_t dcpage_flushes_xcall;
10839
10840 extern int sysctl_tsb_ratio;
10841 #endif
10842diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
10843index 9689176..63c18ea 100644
10844--- a/arch/sparc/include/asm/spinlock_64.h
10845+++ b/arch/sparc/include/asm/spinlock_64.h
10846@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
10847
10848 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
10849
10850-static void inline arch_read_lock(arch_rwlock_t *lock)
10851+static inline void arch_read_lock(arch_rwlock_t *lock)
10852 {
10853 unsigned long tmp1, tmp2;
10854
10855 __asm__ __volatile__ (
10856 "1: ldsw [%2], %0\n"
10857 " brlz,pn %0, 2f\n"
10858-"4: add %0, 1, %1\n"
10859+"4: addcc %0, 1, %1\n"
10860+
10861+#ifdef CONFIG_PAX_REFCOUNT
10862+" tvs %%icc, 6\n"
10863+#endif
10864+
10865 " cas [%2], %0, %1\n"
10866 " cmp %0, %1\n"
10867 " bne,pn %%icc, 1b\n"
10868@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
10869 " .previous"
10870 : "=&r" (tmp1), "=&r" (tmp2)
10871 : "r" (lock)
10872- : "memory");
10873+ : "memory", "cc");
10874 }
10875
10876-static int inline arch_read_trylock(arch_rwlock_t *lock)
10877+static inline int arch_read_trylock(arch_rwlock_t *lock)
10878 {
10879 int tmp1, tmp2;
10880
10881@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10882 "1: ldsw [%2], %0\n"
10883 " brlz,a,pn %0, 2f\n"
10884 " mov 0, %0\n"
10885-" add %0, 1, %1\n"
10886+" addcc %0, 1, %1\n"
10887+
10888+#ifdef CONFIG_PAX_REFCOUNT
10889+" tvs %%icc, 6\n"
10890+#endif
10891+
10892 " cas [%2], %0, %1\n"
10893 " cmp %0, %1\n"
10894 " bne,pn %%icc, 1b\n"
10895@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10896 return tmp1;
10897 }
10898
10899-static void inline arch_read_unlock(arch_rwlock_t *lock)
10900+static inline void arch_read_unlock(arch_rwlock_t *lock)
10901 {
10902 unsigned long tmp1, tmp2;
10903
10904 __asm__ __volatile__(
10905 "1: lduw [%2], %0\n"
10906-" sub %0, 1, %1\n"
10907+" subcc %0, 1, %1\n"
10908+
10909+#ifdef CONFIG_PAX_REFCOUNT
10910+" tvs %%icc, 6\n"
10911+#endif
10912+
10913 " cas [%2], %0, %1\n"
10914 " cmp %0, %1\n"
10915 " bne,pn %%xcc, 1b\n"
10916@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
10917 : "memory");
10918 }
10919
10920-static void inline arch_write_lock(arch_rwlock_t *lock)
10921+static inline void arch_write_lock(arch_rwlock_t *lock)
10922 {
10923 unsigned long mask, tmp1, tmp2;
10924
10925@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
10926 : "memory");
10927 }
10928
10929-static void inline arch_write_unlock(arch_rwlock_t *lock)
10930+static inline void arch_write_unlock(arch_rwlock_t *lock)
10931 {
10932 __asm__ __volatile__(
10933 " stw %%g0, [%0]"
10934@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
10935 : "memory");
10936 }
10937
10938-static int inline arch_write_trylock(arch_rwlock_t *lock)
10939+static inline int arch_write_trylock(arch_rwlock_t *lock)
10940 {
10941 unsigned long mask, tmp1, tmp2, result;
10942
10943diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
10944index 96efa7a..16858bf 100644
10945--- a/arch/sparc/include/asm/thread_info_32.h
10946+++ b/arch/sparc/include/asm/thread_info_32.h
10947@@ -49,6 +49,8 @@ struct thread_info {
10948 unsigned long w_saved;
10949
10950 struct restart_block restart_block;
10951+
10952+ unsigned long lowest_stack;
10953 };
10954
10955 /*
10956diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
10957index cc6275c..7eb8e21 100644
10958--- a/arch/sparc/include/asm/thread_info_64.h
10959+++ b/arch/sparc/include/asm/thread_info_64.h
10960@@ -63,6 +63,8 @@ struct thread_info {
10961 struct pt_regs *kern_una_regs;
10962 unsigned int kern_una_insn;
10963
10964+ unsigned long lowest_stack;
10965+
10966 unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
10967 __attribute__ ((aligned(64)));
10968 };
10969@@ -190,12 +192,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
10970 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
10971 /* flag bit 4 is available */
10972 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
10973-/* flag bit 6 is available */
10974+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
10975 #define TIF_32BIT 7 /* 32-bit binary */
10976 #define TIF_NOHZ 8 /* in adaptive nohz mode */
10977 #define TIF_SECCOMP 9 /* secure computing */
10978 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
10979 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
10980+
10981 /* NOTE: Thread flags >= 12 should be ones we have no interest
10982 * in using in assembly, else we can't use the mask as
10983 * an immediate value in instructions such as andcc.
10984@@ -215,12 +218,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
10985 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
10986 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
10987 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
10988+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
10989
10990 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
10991 _TIF_DO_NOTIFY_RESUME_MASK | \
10992 _TIF_NEED_RESCHED)
10993 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
10994
10995+#define _TIF_WORK_SYSCALL \
10996+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
10997+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
10998+
10999+
11000 /*
11001 * Thread-synchronous status.
11002 *
11003diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
11004index bd56c28..4b63d83 100644
11005--- a/arch/sparc/include/asm/uaccess.h
11006+++ b/arch/sparc/include/asm/uaccess.h
11007@@ -1,5 +1,6 @@
11008 #ifndef ___ASM_SPARC_UACCESS_H
11009 #define ___ASM_SPARC_UACCESS_H
11010+
11011 #if defined(__sparc__) && defined(__arch64__)
11012 #include <asm/uaccess_64.h>
11013 #else
11014diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
11015index 9634d08..f55fe4f 100644
11016--- a/arch/sparc/include/asm/uaccess_32.h
11017+++ b/arch/sparc/include/asm/uaccess_32.h
11018@@ -250,27 +250,46 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
11019
11020 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
11021 {
11022- if (n && __access_ok((unsigned long) to, n))
11023+ if ((long)n < 0)
11024+ return n;
11025+
11026+ if (n && __access_ok((unsigned long) to, n)) {
11027+ if (!__builtin_constant_p(n))
11028+ check_object_size(from, n, true);
11029 return __copy_user(to, (__force void __user *) from, n);
11030- else
11031+ } else
11032 return n;
11033 }
11034
11035 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
11036 {
11037+ if ((long)n < 0)
11038+ return n;
11039+
11040+ if (!__builtin_constant_p(n))
11041+ check_object_size(from, n, true);
11042+
11043 return __copy_user(to, (__force void __user *) from, n);
11044 }
11045
11046 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
11047 {
11048- if (n && __access_ok((unsigned long) from, n))
11049+ if ((long)n < 0)
11050+ return n;
11051+
11052+ if (n && __access_ok((unsigned long) from, n)) {
11053+ if (!__builtin_constant_p(n))
11054+ check_object_size(to, n, false);
11055 return __copy_user((__force void __user *) to, from, n);
11056- else
11057+ } else
11058 return n;
11059 }
11060
11061 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
11062 {
11063+ if ((long)n < 0)
11064+ return n;
11065+
11066 return __copy_user((__force void __user *) to, from, n);
11067 }
11068
11069diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
11070index c990a5e..f17b9c1 100644
11071--- a/arch/sparc/include/asm/uaccess_64.h
11072+++ b/arch/sparc/include/asm/uaccess_64.h
11073@@ -10,6 +10,7 @@
11074 #include <linux/compiler.h>
11075 #include <linux/string.h>
11076 #include <linux/thread_info.h>
11077+#include <linux/kernel.h>
11078 #include <asm/asi.h>
11079 #include <asm/spitfire.h>
11080 #include <asm-generic/uaccess-unaligned.h>
11081@@ -214,8 +215,15 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
11082 static inline unsigned long __must_check
11083 copy_from_user(void *to, const void __user *from, unsigned long size)
11084 {
11085- unsigned long ret = ___copy_from_user(to, from, size);
11086+ unsigned long ret;
11087
11088+ if ((long)size < 0 || size > INT_MAX)
11089+ return size;
11090+
11091+ if (!__builtin_constant_p(size))
11092+ check_object_size(to, size, false);
11093+
11094+ ret = ___copy_from_user(to, from, size);
11095 if (unlikely(ret))
11096 ret = copy_from_user_fixup(to, from, size);
11097
11098@@ -231,8 +239,15 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
11099 static inline unsigned long __must_check
11100 copy_to_user(void __user *to, const void *from, unsigned long size)
11101 {
11102- unsigned long ret = ___copy_to_user(to, from, size);
11103+ unsigned long ret;
11104
11105+ if ((long)size < 0 || size > INT_MAX)
11106+ return size;
11107+
11108+ if (!__builtin_constant_p(size))
11109+ check_object_size(from, size, true);
11110+
11111+ ret = ___copy_to_user(to, from, size);
11112 if (unlikely(ret))
11113 ret = copy_to_user_fixup(to, from, size);
11114 return ret;
11115diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
11116index 7cf9c6e..6206648 100644
11117--- a/arch/sparc/kernel/Makefile
11118+++ b/arch/sparc/kernel/Makefile
11119@@ -4,7 +4,7 @@
11120 #
11121
11122 asflags-y := -ansi
11123-ccflags-y := -Werror
11124+#ccflags-y := -Werror
11125
11126 extra-y := head_$(BITS).o
11127
11128diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
11129index 50e7b62..79fae35 100644
11130--- a/arch/sparc/kernel/process_32.c
11131+++ b/arch/sparc/kernel/process_32.c
11132@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
11133
11134 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
11135 r->psr, r->pc, r->npc, r->y, print_tainted());
11136- printk("PC: <%pS>\n", (void *) r->pc);
11137+ printk("PC: <%pA>\n", (void *) r->pc);
11138 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
11139 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
11140 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
11141 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
11142 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
11143 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
11144- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
11145+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
11146
11147 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
11148 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
11149@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
11150 rw = (struct reg_window32 *) fp;
11151 pc = rw->ins[7];
11152 printk("[%08lx : ", pc);
11153- printk("%pS ] ", (void *) pc);
11154+ printk("%pA ] ", (void *) pc);
11155 fp = rw->ins[6];
11156 } while (++count < 16);
11157 printk("\n");
11158diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
11159index 0be7bf9..2b1cba8 100644
11160--- a/arch/sparc/kernel/process_64.c
11161+++ b/arch/sparc/kernel/process_64.c
11162@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
11163 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
11164 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
11165 if (regs->tstate & TSTATE_PRIV)
11166- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
11167+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
11168 }
11169
11170 void show_regs(struct pt_regs *regs)
11171@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
11172
11173 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
11174 regs->tpc, regs->tnpc, regs->y, print_tainted());
11175- printk("TPC: <%pS>\n", (void *) regs->tpc);
11176+ printk("TPC: <%pA>\n", (void *) regs->tpc);
11177 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
11178 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
11179 regs->u_regs[3]);
11180@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
11181 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
11182 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
11183 regs->u_regs[15]);
11184- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
11185+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
11186 show_regwindow(regs);
11187 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
11188 }
11189@@ -278,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
11190 ((tp && tp->task) ? tp->task->pid : -1));
11191
11192 if (gp->tstate & TSTATE_PRIV) {
11193- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
11194+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
11195 (void *) gp->tpc,
11196 (void *) gp->o7,
11197 (void *) gp->i7,
11198diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
11199index 79cc0d1..ec62734 100644
11200--- a/arch/sparc/kernel/prom_common.c
11201+++ b/arch/sparc/kernel/prom_common.c
11202@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
11203
11204 unsigned int prom_early_allocated __initdata;
11205
11206-static struct of_pdt_ops prom_sparc_ops __initdata = {
11207+static struct of_pdt_ops prom_sparc_ops __initconst = {
11208 .nextprop = prom_common_nextprop,
11209 .getproplen = prom_getproplen,
11210 .getproperty = prom_getproperty,
11211diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
11212index c13c9f2..d572c34 100644
11213--- a/arch/sparc/kernel/ptrace_64.c
11214+++ b/arch/sparc/kernel/ptrace_64.c
11215@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
11216 return ret;
11217 }
11218
11219+#ifdef CONFIG_GRKERNSEC_SETXID
11220+extern void gr_delayed_cred_worker(void);
11221+#endif
11222+
11223 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
11224 {
11225 int ret = 0;
11226@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
11227 if (test_thread_flag(TIF_NOHZ))
11228 user_exit();
11229
11230+#ifdef CONFIG_GRKERNSEC_SETXID
11231+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
11232+ gr_delayed_cred_worker();
11233+#endif
11234+
11235 if (test_thread_flag(TIF_SYSCALL_TRACE))
11236 ret = tracehook_report_syscall_entry(regs);
11237
11238@@ -1093,6 +1102,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
11239 if (test_thread_flag(TIF_NOHZ))
11240 user_exit();
11241
11242+#ifdef CONFIG_GRKERNSEC_SETXID
11243+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
11244+ gr_delayed_cred_worker();
11245+#endif
11246+
11247 audit_syscall_exit(regs);
11248
11249 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
11250diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
11251index c9300bf..b2080cf 100644
11252--- a/arch/sparc/kernel/smp_64.c
11253+++ b/arch/sparc/kernel/smp_64.c
11254@@ -883,7 +883,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
11255 return;
11256
11257 #ifdef CONFIG_DEBUG_DCFLUSH
11258- atomic_inc(&dcpage_flushes);
11259+ atomic_inc_unchecked(&dcpage_flushes);
11260 #endif
11261
11262 this_cpu = get_cpu();
11263@@ -907,7 +907,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
11264 xcall_deliver(data0, __pa(pg_addr),
11265 (u64) pg_addr, cpumask_of(cpu));
11266 #ifdef CONFIG_DEBUG_DCFLUSH
11267- atomic_inc(&dcpage_flushes_xcall);
11268+ atomic_inc_unchecked(&dcpage_flushes_xcall);
11269 #endif
11270 }
11271 }
11272@@ -926,7 +926,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
11273 preempt_disable();
11274
11275 #ifdef CONFIG_DEBUG_DCFLUSH
11276- atomic_inc(&dcpage_flushes);
11277+ atomic_inc_unchecked(&dcpage_flushes);
11278 #endif
11279 data0 = 0;
11280 pg_addr = page_address(page);
11281@@ -943,7 +943,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
11282 xcall_deliver(data0, __pa(pg_addr),
11283 (u64) pg_addr, cpu_online_mask);
11284 #ifdef CONFIG_DEBUG_DCFLUSH
11285- atomic_inc(&dcpage_flushes_xcall);
11286+ atomic_inc_unchecked(&dcpage_flushes_xcall);
11287 #endif
11288 }
11289 __local_flush_dcache_page(page);
11290diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
11291index 646988d..b88905f 100644
11292--- a/arch/sparc/kernel/sys_sparc_32.c
11293+++ b/arch/sparc/kernel/sys_sparc_32.c
11294@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
11295 if (len > TASK_SIZE - PAGE_SIZE)
11296 return -ENOMEM;
11297 if (!addr)
11298- addr = TASK_UNMAPPED_BASE;
11299+ addr = current->mm->mmap_base;
11300
11301 info.flags = 0;
11302 info.length = len;
11303diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
11304index c85403d..6af95c9 100644
11305--- a/arch/sparc/kernel/sys_sparc_64.c
11306+++ b/arch/sparc/kernel/sys_sparc_64.c
11307@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
11308 struct vm_area_struct * vma;
11309 unsigned long task_size = TASK_SIZE;
11310 int do_color_align;
11311+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
11312 struct vm_unmapped_area_info info;
11313
11314 if (flags & MAP_FIXED) {
11315 /* We do not accept a shared mapping if it would violate
11316 * cache aliasing constraints.
11317 */
11318- if ((flags & MAP_SHARED) &&
11319+ if ((filp || (flags & MAP_SHARED)) &&
11320 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
11321 return -EINVAL;
11322 return addr;
11323@@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
11324 if (filp || (flags & MAP_SHARED))
11325 do_color_align = 1;
11326
11327+#ifdef CONFIG_PAX_RANDMMAP
11328+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
11329+#endif
11330+
11331 if (addr) {
11332 if (do_color_align)
11333 addr = COLOR_ALIGN(addr, pgoff);
11334@@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
11335 addr = PAGE_ALIGN(addr);
11336
11337 vma = find_vma(mm, addr);
11338- if (task_size - len >= addr &&
11339- (!vma || addr + len <= vma->vm_start))
11340+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
11341 return addr;
11342 }
11343
11344 info.flags = 0;
11345 info.length = len;
11346- info.low_limit = TASK_UNMAPPED_BASE;
11347+ info.low_limit = mm->mmap_base;
11348 info.high_limit = min(task_size, VA_EXCLUDE_START);
11349 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
11350 info.align_offset = pgoff << PAGE_SHIFT;
11351+ info.threadstack_offset = offset;
11352 addr = vm_unmapped_area(&info);
11353
11354 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
11355 VM_BUG_ON(addr != -ENOMEM);
11356 info.low_limit = VA_EXCLUDE_END;
11357+
11358+#ifdef CONFIG_PAX_RANDMMAP
11359+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11360+ info.low_limit += mm->delta_mmap;
11361+#endif
11362+
11363 info.high_limit = task_size;
11364 addr = vm_unmapped_area(&info);
11365 }
11366@@ -150,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11367 unsigned long task_size = STACK_TOP32;
11368 unsigned long addr = addr0;
11369 int do_color_align;
11370+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
11371 struct vm_unmapped_area_info info;
11372
11373 /* This should only ever run for 32-bit processes. */
11374@@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11375 /* We do not accept a shared mapping if it would violate
11376 * cache aliasing constraints.
11377 */
11378- if ((flags & MAP_SHARED) &&
11379+ if ((filp || (flags & MAP_SHARED)) &&
11380 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
11381 return -EINVAL;
11382 return addr;
11383@@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11384 if (filp || (flags & MAP_SHARED))
11385 do_color_align = 1;
11386
11387+#ifdef CONFIG_PAX_RANDMMAP
11388+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
11389+#endif
11390+
11391 /* requesting a specific address */
11392 if (addr) {
11393 if (do_color_align)
11394@@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11395 addr = PAGE_ALIGN(addr);
11396
11397 vma = find_vma(mm, addr);
11398- if (task_size - len >= addr &&
11399- (!vma || addr + len <= vma->vm_start))
11400+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
11401 return addr;
11402 }
11403
11404@@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11405 info.high_limit = mm->mmap_base;
11406 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
11407 info.align_offset = pgoff << PAGE_SHIFT;
11408+ info.threadstack_offset = offset;
11409 addr = vm_unmapped_area(&info);
11410
11411 /*
11412@@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11413 VM_BUG_ON(addr != -ENOMEM);
11414 info.flags = 0;
11415 info.low_limit = TASK_UNMAPPED_BASE;
11416+
11417+#ifdef CONFIG_PAX_RANDMMAP
11418+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11419+ info.low_limit += mm->delta_mmap;
11420+#endif
11421+
11422 info.high_limit = STACK_TOP32;
11423 addr = vm_unmapped_area(&info);
11424 }
11425@@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
11426 EXPORT_SYMBOL(get_fb_unmapped_area);
11427
11428 /* Essentially the same as PowerPC. */
11429-static unsigned long mmap_rnd(void)
11430+static unsigned long mmap_rnd(struct mm_struct *mm)
11431 {
11432 unsigned long rnd = 0UL;
11433
11434+#ifdef CONFIG_PAX_RANDMMAP
11435+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
11436+#endif
11437+
11438 if (current->flags & PF_RANDOMIZE) {
11439 unsigned long val = get_random_int();
11440 if (test_thread_flag(TIF_32BIT))
11441@@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
11442
11443 void arch_pick_mmap_layout(struct mm_struct *mm)
11444 {
11445- unsigned long random_factor = mmap_rnd();
11446+ unsigned long random_factor = mmap_rnd(mm);
11447 unsigned long gap;
11448
11449 /*
11450@@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
11451 gap == RLIM_INFINITY ||
11452 sysctl_legacy_va_layout) {
11453 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
11454+
11455+#ifdef CONFIG_PAX_RANDMMAP
11456+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11457+ mm->mmap_base += mm->delta_mmap;
11458+#endif
11459+
11460 mm->get_unmapped_area = arch_get_unmapped_area;
11461 } else {
11462 /* We know it's 32-bit */
11463@@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
11464 gap = (task_size / 6 * 5);
11465
11466 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
11467+
11468+#ifdef CONFIG_PAX_RANDMMAP
11469+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11470+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
11471+#endif
11472+
11473 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
11474 }
11475 }
11476diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
11477index 33a17e7..d87fb1f 100644
11478--- a/arch/sparc/kernel/syscalls.S
11479+++ b/arch/sparc/kernel/syscalls.S
11480@@ -52,7 +52,7 @@ sys32_rt_sigreturn:
11481 #endif
11482 .align 32
11483 1: ldx [%g6 + TI_FLAGS], %l5
11484- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11485+ andcc %l5, _TIF_WORK_SYSCALL, %g0
11486 be,pt %icc, rtrap
11487 nop
11488 call syscall_trace_leave
11489@@ -184,7 +184,7 @@ linux_sparc_syscall32:
11490
11491 srl %i3, 0, %o3 ! IEU0
11492 srl %i2, 0, %o2 ! IEU0 Group
11493- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11494+ andcc %l0, _TIF_WORK_SYSCALL, %g0
11495 bne,pn %icc, linux_syscall_trace32 ! CTI
11496 mov %i0, %l5 ! IEU1
11497 5: call %l7 ! CTI Group brk forced
11498@@ -208,7 +208,7 @@ linux_sparc_syscall:
11499
11500 mov %i3, %o3 ! IEU1
11501 mov %i4, %o4 ! IEU0 Group
11502- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11503+ andcc %l0, _TIF_WORK_SYSCALL, %g0
11504 bne,pn %icc, linux_syscall_trace ! CTI Group
11505 mov %i0, %l5 ! IEU0
11506 2: call %l7 ! CTI Group brk forced
11507@@ -223,7 +223,7 @@ ret_sys_call:
11508
11509 cmp %o0, -ERESTART_RESTARTBLOCK
11510 bgeu,pn %xcc, 1f
11511- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11512+ andcc %l0, _TIF_WORK_SYSCALL, %g0
11513 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
11514
11515 2:
11516diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
11517index 6fd386c5..6907d81 100644
11518--- a/arch/sparc/kernel/traps_32.c
11519+++ b/arch/sparc/kernel/traps_32.c
11520@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
11521 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
11522 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
11523
11524+extern void gr_handle_kernel_exploit(void);
11525+
11526 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11527 {
11528 static int die_counter;
11529@@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11530 count++ < 30 &&
11531 (((unsigned long) rw) >= PAGE_OFFSET) &&
11532 !(((unsigned long) rw) & 0x7)) {
11533- printk("Caller[%08lx]: %pS\n", rw->ins[7],
11534+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
11535 (void *) rw->ins[7]);
11536 rw = (struct reg_window32 *)rw->ins[6];
11537 }
11538 }
11539 printk("Instruction DUMP:");
11540 instruction_dump ((unsigned long *) regs->pc);
11541- if(regs->psr & PSR_PS)
11542+ if(regs->psr & PSR_PS) {
11543+ gr_handle_kernel_exploit();
11544 do_exit(SIGKILL);
11545+ }
11546 do_exit(SIGSEGV);
11547 }
11548
11549diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
11550index 981a769..d906eda 100644
11551--- a/arch/sparc/kernel/traps_64.c
11552+++ b/arch/sparc/kernel/traps_64.c
11553@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
11554 i + 1,
11555 p->trapstack[i].tstate, p->trapstack[i].tpc,
11556 p->trapstack[i].tnpc, p->trapstack[i].tt);
11557- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
11558+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
11559 }
11560 }
11561
11562@@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
11563
11564 lvl -= 0x100;
11565 if (regs->tstate & TSTATE_PRIV) {
11566+
11567+#ifdef CONFIG_PAX_REFCOUNT
11568+ if (lvl == 6)
11569+ pax_report_refcount_overflow(regs);
11570+#endif
11571+
11572 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
11573 die_if_kernel(buffer, regs);
11574 }
11575@@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
11576 void bad_trap_tl1(struct pt_regs *regs, long lvl)
11577 {
11578 char buffer[32];
11579-
11580+
11581 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
11582 0, lvl, SIGTRAP) == NOTIFY_STOP)
11583 return;
11584
11585+#ifdef CONFIG_PAX_REFCOUNT
11586+ if (lvl == 6)
11587+ pax_report_refcount_overflow(regs);
11588+#endif
11589+
11590 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
11591
11592 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
11593@@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
11594 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
11595 printk("%s" "ERROR(%d): ",
11596 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
11597- printk("TPC<%pS>\n", (void *) regs->tpc);
11598+ printk("TPC<%pA>\n", (void *) regs->tpc);
11599 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
11600 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
11601 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
11602@@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11603 smp_processor_id(),
11604 (type & 0x1) ? 'I' : 'D',
11605 regs->tpc);
11606- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
11607+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
11608 panic("Irrecoverable Cheetah+ parity error.");
11609 }
11610
11611@@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11612 smp_processor_id(),
11613 (type & 0x1) ? 'I' : 'D',
11614 regs->tpc);
11615- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
11616+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
11617 }
11618
11619 struct sun4v_error_entry {
11620@@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
11621 /*0x38*/u64 reserved_5;
11622 };
11623
11624-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11625-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11626+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11627+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11628
11629 static const char *sun4v_err_type_to_str(u8 type)
11630 {
11631@@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
11632 }
11633
11634 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11635- int cpu, const char *pfx, atomic_t *ocnt)
11636+ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
11637 {
11638 u64 *raw_ptr = (u64 *) ent;
11639 u32 attrs;
11640@@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11641
11642 show_regs(regs);
11643
11644- if ((cnt = atomic_read(ocnt)) != 0) {
11645- atomic_set(ocnt, 0);
11646+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
11647+ atomic_set_unchecked(ocnt, 0);
11648 wmb();
11649 printk("%s: Queue overflowed %d times.\n",
11650 pfx, cnt);
11651@@ -2048,7 +2059,7 @@ out:
11652 */
11653 void sun4v_resum_overflow(struct pt_regs *regs)
11654 {
11655- atomic_inc(&sun4v_resum_oflow_cnt);
11656+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
11657 }
11658
11659 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
11660@@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
11661 /* XXX Actually even this can make not that much sense. Perhaps
11662 * XXX we should just pull the plug and panic directly from here?
11663 */
11664- atomic_inc(&sun4v_nonresum_oflow_cnt);
11665+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
11666 }
11667
11668 static void sun4v_tlb_error(struct pt_regs *regs)
11669@@ -2120,9 +2131,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
11670
11671 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
11672 regs->tpc, tl);
11673- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
11674+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
11675 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11676- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
11677+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
11678 (void *) regs->u_regs[UREG_I7]);
11679 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
11680 "pte[%lx] error[%lx]\n",
11681@@ -2143,9 +2154,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
11682
11683 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
11684 regs->tpc, tl);
11685- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
11686+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
11687 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11688- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
11689+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
11690 (void *) regs->u_regs[UREG_I7]);
11691 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
11692 "pte[%lx] error[%lx]\n",
11693@@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
11694 fp = (unsigned long)sf->fp + STACK_BIAS;
11695 }
11696
11697- printk(" [%016lx] %pS\n", pc, (void *) pc);
11698+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11699 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
11700 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
11701 int index = tsk->curr_ret_stack;
11702 if (tsk->ret_stack && index >= graph) {
11703 pc = tsk->ret_stack[index - graph].ret;
11704- printk(" [%016lx] %pS\n", pc, (void *) pc);
11705+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11706 graph++;
11707 }
11708 }
11709@@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
11710 return (struct reg_window *) (fp + STACK_BIAS);
11711 }
11712
11713+extern void gr_handle_kernel_exploit(void);
11714+
11715 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11716 {
11717 static int die_counter;
11718@@ -2414,7 +2427,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11719 while (rw &&
11720 count++ < 30 &&
11721 kstack_valid(tp, (unsigned long) rw)) {
11722- printk("Caller[%016lx]: %pS\n", rw->ins[7],
11723+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
11724 (void *) rw->ins[7]);
11725
11726 rw = kernel_stack_up(rw);
11727@@ -2427,8 +2440,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11728 }
11729 user_instruction_dump ((unsigned int __user *) regs->tpc);
11730 }
11731- if (regs->tstate & TSTATE_PRIV)
11732+ if (regs->tstate & TSTATE_PRIV) {
11733+ gr_handle_kernel_exploit();
11734 do_exit(SIGKILL);
11735+ }
11736 do_exit(SIGSEGV);
11737 }
11738 EXPORT_SYMBOL(die_if_kernel);
11739diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
11740index 62098a8..547ab2c 100644
11741--- a/arch/sparc/kernel/unaligned_64.c
11742+++ b/arch/sparc/kernel/unaligned_64.c
11743@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
11744 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
11745
11746 if (__ratelimit(&ratelimit)) {
11747- printk("Kernel unaligned access at TPC[%lx] %pS\n",
11748+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
11749 regs->tpc, (void *) regs->tpc);
11750 }
11751 }
11752diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
11753index 3269b02..64f5231 100644
11754--- a/arch/sparc/lib/Makefile
11755+++ b/arch/sparc/lib/Makefile
11756@@ -2,7 +2,7 @@
11757 #
11758
11759 asflags-y := -ansi -DST_DIV0=0x02
11760-ccflags-y := -Werror
11761+#ccflags-y := -Werror
11762
11763 lib-$(CONFIG_SPARC32) += ashrdi3.o
11764 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
11765diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
11766index 85c233d..68500e0 100644
11767--- a/arch/sparc/lib/atomic_64.S
11768+++ b/arch/sparc/lib/atomic_64.S
11769@@ -17,7 +17,12 @@
11770 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
11771 BACKOFF_SETUP(%o2)
11772 1: lduw [%o1], %g1
11773- add %g1, %o0, %g7
11774+ addcc %g1, %o0, %g7
11775+
11776+#ifdef CONFIG_PAX_REFCOUNT
11777+ tvs %icc, 6
11778+#endif
11779+
11780 cas [%o1], %g1, %g7
11781 cmp %g1, %g7
11782 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11783@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
11784 2: BACKOFF_SPIN(%o2, %o3, 1b)
11785 ENDPROC(atomic_add)
11786
11787+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11788+ BACKOFF_SETUP(%o2)
11789+1: lduw [%o1], %g1
11790+ add %g1, %o0, %g7
11791+ cas [%o1], %g1, %g7
11792+ cmp %g1, %g7
11793+ bne,pn %icc, 2f
11794+ nop
11795+ retl
11796+ nop
11797+2: BACKOFF_SPIN(%o2, %o3, 1b)
11798+ENDPROC(atomic_add_unchecked)
11799+
11800 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11801 BACKOFF_SETUP(%o2)
11802 1: lduw [%o1], %g1
11803- sub %g1, %o0, %g7
11804+ subcc %g1, %o0, %g7
11805+
11806+#ifdef CONFIG_PAX_REFCOUNT
11807+ tvs %icc, 6
11808+#endif
11809+
11810 cas [%o1], %g1, %g7
11811 cmp %g1, %g7
11812 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11813@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11814 2: BACKOFF_SPIN(%o2, %o3, 1b)
11815 ENDPROC(atomic_sub)
11816
11817+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
11818+ BACKOFF_SETUP(%o2)
11819+1: lduw [%o1], %g1
11820+ sub %g1, %o0, %g7
11821+ cas [%o1], %g1, %g7
11822+ cmp %g1, %g7
11823+ bne,pn %icc, 2f
11824+ nop
11825+ retl
11826+ nop
11827+2: BACKOFF_SPIN(%o2, %o3, 1b)
11828+ENDPROC(atomic_sub_unchecked)
11829+
11830 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11831 BACKOFF_SETUP(%o2)
11832 1: lduw [%o1], %g1
11833- add %g1, %o0, %g7
11834+ addcc %g1, %o0, %g7
11835+
11836+#ifdef CONFIG_PAX_REFCOUNT
11837+ tvs %icc, 6
11838+#endif
11839+
11840 cas [%o1], %g1, %g7
11841 cmp %g1, %g7
11842 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11843@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11844 2: BACKOFF_SPIN(%o2, %o3, 1b)
11845 ENDPROC(atomic_add_ret)
11846
11847+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11848+ BACKOFF_SETUP(%o2)
11849+1: lduw [%o1], %g1
11850+ addcc %g1, %o0, %g7
11851+ cas [%o1], %g1, %g7
11852+ cmp %g1, %g7
11853+ bne,pn %icc, 2f
11854+ add %g7, %o0, %g7
11855+ sra %g7, 0, %o0
11856+ retl
11857+ nop
11858+2: BACKOFF_SPIN(%o2, %o3, 1b)
11859+ENDPROC(atomic_add_ret_unchecked)
11860+
11861 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
11862 BACKOFF_SETUP(%o2)
11863 1: lduw [%o1], %g1
11864- sub %g1, %o0, %g7
11865+ subcc %g1, %o0, %g7
11866+
11867+#ifdef CONFIG_PAX_REFCOUNT
11868+ tvs %icc, 6
11869+#endif
11870+
11871 cas [%o1], %g1, %g7
11872 cmp %g1, %g7
11873 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11874@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
11875 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
11876 BACKOFF_SETUP(%o2)
11877 1: ldx [%o1], %g1
11878- add %g1, %o0, %g7
11879+ addcc %g1, %o0, %g7
11880+
11881+#ifdef CONFIG_PAX_REFCOUNT
11882+ tvs %xcc, 6
11883+#endif
11884+
11885 casx [%o1], %g1, %g7
11886 cmp %g1, %g7
11887 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11888@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
11889 2: BACKOFF_SPIN(%o2, %o3, 1b)
11890 ENDPROC(atomic64_add)
11891
11892+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11893+ BACKOFF_SETUP(%o2)
11894+1: ldx [%o1], %g1
11895+ addcc %g1, %o0, %g7
11896+ casx [%o1], %g1, %g7
11897+ cmp %g1, %g7
11898+ bne,pn %xcc, 2f
11899+ nop
11900+ retl
11901+ nop
11902+2: BACKOFF_SPIN(%o2, %o3, 1b)
11903+ENDPROC(atomic64_add_unchecked)
11904+
11905 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11906 BACKOFF_SETUP(%o2)
11907 1: ldx [%o1], %g1
11908- sub %g1, %o0, %g7
11909+ subcc %g1, %o0, %g7
11910+
11911+#ifdef CONFIG_PAX_REFCOUNT
11912+ tvs %xcc, 6
11913+#endif
11914+
11915 casx [%o1], %g1, %g7
11916 cmp %g1, %g7
11917 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11918@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11919 2: BACKOFF_SPIN(%o2, %o3, 1b)
11920 ENDPROC(atomic64_sub)
11921
11922+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
11923+ BACKOFF_SETUP(%o2)
11924+1: ldx [%o1], %g1
11925+ subcc %g1, %o0, %g7
11926+ casx [%o1], %g1, %g7
11927+ cmp %g1, %g7
11928+ bne,pn %xcc, 2f
11929+ nop
11930+ retl
11931+ nop
11932+2: BACKOFF_SPIN(%o2, %o3, 1b)
11933+ENDPROC(atomic64_sub_unchecked)
11934+
11935 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11936 BACKOFF_SETUP(%o2)
11937 1: ldx [%o1], %g1
11938- add %g1, %o0, %g7
11939+ addcc %g1, %o0, %g7
11940+
11941+#ifdef CONFIG_PAX_REFCOUNT
11942+ tvs %xcc, 6
11943+#endif
11944+
11945 casx [%o1], %g1, %g7
11946 cmp %g1, %g7
11947 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11948@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11949 2: BACKOFF_SPIN(%o2, %o3, 1b)
11950 ENDPROC(atomic64_add_ret)
11951
11952+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11953+ BACKOFF_SETUP(%o2)
11954+1: ldx [%o1], %g1
11955+ addcc %g1, %o0, %g7
11956+ casx [%o1], %g1, %g7
11957+ cmp %g1, %g7
11958+ bne,pn %xcc, 2f
11959+ add %g7, %o0, %g7
11960+ mov %g7, %o0
11961+ retl
11962+ nop
11963+2: BACKOFF_SPIN(%o2, %o3, 1b)
11964+ENDPROC(atomic64_add_ret_unchecked)
11965+
11966 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
11967 BACKOFF_SETUP(%o2)
11968 1: ldx [%o1], %g1
11969- sub %g1, %o0, %g7
11970+ subcc %g1, %o0, %g7
11971+
11972+#ifdef CONFIG_PAX_REFCOUNT
11973+ tvs %xcc, 6
11974+#endif
11975+
11976 casx [%o1], %g1, %g7
11977 cmp %g1, %g7
11978 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11979diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
11980index 323335b..ed85ea2 100644
11981--- a/arch/sparc/lib/ksyms.c
11982+++ b/arch/sparc/lib/ksyms.c
11983@@ -100,12 +100,18 @@ EXPORT_SYMBOL(__clear_user);
11984
11985 /* Atomic counter implementation. */
11986 EXPORT_SYMBOL(atomic_add);
11987+EXPORT_SYMBOL(atomic_add_unchecked);
11988 EXPORT_SYMBOL(atomic_add_ret);
11989+EXPORT_SYMBOL(atomic_add_ret_unchecked);
11990 EXPORT_SYMBOL(atomic_sub);
11991+EXPORT_SYMBOL(atomic_sub_unchecked);
11992 EXPORT_SYMBOL(atomic_sub_ret);
11993 EXPORT_SYMBOL(atomic64_add);
11994+EXPORT_SYMBOL(atomic64_add_unchecked);
11995 EXPORT_SYMBOL(atomic64_add_ret);
11996+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
11997 EXPORT_SYMBOL(atomic64_sub);
11998+EXPORT_SYMBOL(atomic64_sub_unchecked);
11999 EXPORT_SYMBOL(atomic64_sub_ret);
12000 EXPORT_SYMBOL(atomic64_dec_if_positive);
12001
12002diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
12003index 30c3ecc..736f015 100644
12004--- a/arch/sparc/mm/Makefile
12005+++ b/arch/sparc/mm/Makefile
12006@@ -2,7 +2,7 @@
12007 #
12008
12009 asflags-y := -ansi
12010-ccflags-y := -Werror
12011+#ccflags-y := -Werror
12012
12013 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
12014 obj-y += fault_$(BITS).o
12015diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
12016index 908e8c1..1524793 100644
12017--- a/arch/sparc/mm/fault_32.c
12018+++ b/arch/sparc/mm/fault_32.c
12019@@ -21,6 +21,9 @@
12020 #include <linux/perf_event.h>
12021 #include <linux/interrupt.h>
12022 #include <linux/kdebug.h>
12023+#include <linux/slab.h>
12024+#include <linux/pagemap.h>
12025+#include <linux/compiler.h>
12026
12027 #include <asm/page.h>
12028 #include <asm/pgtable.h>
12029@@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
12030 return safe_compute_effective_address(regs, insn);
12031 }
12032
12033+#ifdef CONFIG_PAX_PAGEEXEC
12034+#ifdef CONFIG_PAX_DLRESOLVE
12035+static void pax_emuplt_close(struct vm_area_struct *vma)
12036+{
12037+ vma->vm_mm->call_dl_resolve = 0UL;
12038+}
12039+
12040+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
12041+{
12042+ unsigned int *kaddr;
12043+
12044+ vmf->page = alloc_page(GFP_HIGHUSER);
12045+ if (!vmf->page)
12046+ return VM_FAULT_OOM;
12047+
12048+ kaddr = kmap(vmf->page);
12049+ memset(kaddr, 0, PAGE_SIZE);
12050+ kaddr[0] = 0x9DE3BFA8U; /* save */
12051+ flush_dcache_page(vmf->page);
12052+ kunmap(vmf->page);
12053+ return VM_FAULT_MAJOR;
12054+}
12055+
12056+static const struct vm_operations_struct pax_vm_ops = {
12057+ .close = pax_emuplt_close,
12058+ .fault = pax_emuplt_fault
12059+};
12060+
12061+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
12062+{
12063+ int ret;
12064+
12065+ INIT_LIST_HEAD(&vma->anon_vma_chain);
12066+ vma->vm_mm = current->mm;
12067+ vma->vm_start = addr;
12068+ vma->vm_end = addr + PAGE_SIZE;
12069+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
12070+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
12071+ vma->vm_ops = &pax_vm_ops;
12072+
12073+ ret = insert_vm_struct(current->mm, vma);
12074+ if (ret)
12075+ return ret;
12076+
12077+ ++current->mm->total_vm;
12078+ return 0;
12079+}
12080+#endif
12081+
12082+/*
12083+ * PaX: decide what to do with offenders (regs->pc = fault address)
12084+ *
12085+ * returns 1 when task should be killed
12086+ * 2 when patched PLT trampoline was detected
12087+ * 3 when unpatched PLT trampoline was detected
12088+ */
12089+static int pax_handle_fetch_fault(struct pt_regs *regs)
12090+{
12091+
12092+#ifdef CONFIG_PAX_EMUPLT
12093+ int err;
12094+
12095+ do { /* PaX: patched PLT emulation #1 */
12096+ unsigned int sethi1, sethi2, jmpl;
12097+
12098+ err = get_user(sethi1, (unsigned int *)regs->pc);
12099+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
12100+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
12101+
12102+ if (err)
12103+ break;
12104+
12105+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
12106+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
12107+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
12108+ {
12109+ unsigned int addr;
12110+
12111+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
12112+ addr = regs->u_regs[UREG_G1];
12113+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
12114+ regs->pc = addr;
12115+ regs->npc = addr+4;
12116+ return 2;
12117+ }
12118+ } while (0);
12119+
12120+ do { /* PaX: patched PLT emulation #2 */
12121+ unsigned int ba;
12122+
12123+ err = get_user(ba, (unsigned int *)regs->pc);
12124+
12125+ if (err)
12126+ break;
12127+
12128+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
12129+ unsigned int addr;
12130+
12131+ if ((ba & 0xFFC00000U) == 0x30800000U)
12132+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
12133+ else
12134+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
12135+ regs->pc = addr;
12136+ regs->npc = addr+4;
12137+ return 2;
12138+ }
12139+ } while (0);
12140+
12141+ do { /* PaX: patched PLT emulation #3 */
12142+ unsigned int sethi, bajmpl, nop;
12143+
12144+ err = get_user(sethi, (unsigned int *)regs->pc);
12145+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
12146+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
12147+
12148+ if (err)
12149+ break;
12150+
12151+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12152+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
12153+ nop == 0x01000000U)
12154+ {
12155+ unsigned int addr;
12156+
12157+ addr = (sethi & 0x003FFFFFU) << 10;
12158+ regs->u_regs[UREG_G1] = addr;
12159+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
12160+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
12161+ else
12162+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
12163+ regs->pc = addr;
12164+ regs->npc = addr+4;
12165+ return 2;
12166+ }
12167+ } while (0);
12168+
12169+ do { /* PaX: unpatched PLT emulation step 1 */
12170+ unsigned int sethi, ba, nop;
12171+
12172+ err = get_user(sethi, (unsigned int *)regs->pc);
12173+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
12174+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
12175+
12176+ if (err)
12177+ break;
12178+
12179+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12180+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
12181+ nop == 0x01000000U)
12182+ {
12183+ unsigned int addr, save, call;
12184+
12185+ if ((ba & 0xFFC00000U) == 0x30800000U)
12186+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
12187+ else
12188+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
12189+
12190+ err = get_user(save, (unsigned int *)addr);
12191+ err |= get_user(call, (unsigned int *)(addr+4));
12192+ err |= get_user(nop, (unsigned int *)(addr+8));
12193+ if (err)
12194+ break;
12195+
12196+#ifdef CONFIG_PAX_DLRESOLVE
12197+ if (save == 0x9DE3BFA8U &&
12198+ (call & 0xC0000000U) == 0x40000000U &&
12199+ nop == 0x01000000U)
12200+ {
12201+ struct vm_area_struct *vma;
12202+ unsigned long call_dl_resolve;
12203+
12204+ down_read(&current->mm->mmap_sem);
12205+ call_dl_resolve = current->mm->call_dl_resolve;
12206+ up_read(&current->mm->mmap_sem);
12207+ if (likely(call_dl_resolve))
12208+ goto emulate;
12209+
12210+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
12211+
12212+ down_write(&current->mm->mmap_sem);
12213+ if (current->mm->call_dl_resolve) {
12214+ call_dl_resolve = current->mm->call_dl_resolve;
12215+ up_write(&current->mm->mmap_sem);
12216+ if (vma)
12217+ kmem_cache_free(vm_area_cachep, vma);
12218+ goto emulate;
12219+ }
12220+
12221+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
12222+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
12223+ up_write(&current->mm->mmap_sem);
12224+ if (vma)
12225+ kmem_cache_free(vm_area_cachep, vma);
12226+ return 1;
12227+ }
12228+
12229+ if (pax_insert_vma(vma, call_dl_resolve)) {
12230+ up_write(&current->mm->mmap_sem);
12231+ kmem_cache_free(vm_area_cachep, vma);
12232+ return 1;
12233+ }
12234+
12235+ current->mm->call_dl_resolve = call_dl_resolve;
12236+ up_write(&current->mm->mmap_sem);
12237+
12238+emulate:
12239+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12240+ regs->pc = call_dl_resolve;
12241+ regs->npc = addr+4;
12242+ return 3;
12243+ }
12244+#endif
12245+
12246+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
12247+ if ((save & 0xFFC00000U) == 0x05000000U &&
12248+ (call & 0xFFFFE000U) == 0x85C0A000U &&
12249+ nop == 0x01000000U)
12250+ {
12251+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12252+ regs->u_regs[UREG_G2] = addr + 4;
12253+ addr = (save & 0x003FFFFFU) << 10;
12254+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
12255+ regs->pc = addr;
12256+ regs->npc = addr+4;
12257+ return 3;
12258+ }
12259+ }
12260+ } while (0);
12261+
12262+ do { /* PaX: unpatched PLT emulation step 2 */
12263+ unsigned int save, call, nop;
12264+
12265+ err = get_user(save, (unsigned int *)(regs->pc-4));
12266+ err |= get_user(call, (unsigned int *)regs->pc);
12267+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
12268+ if (err)
12269+ break;
12270+
12271+ if (save == 0x9DE3BFA8U &&
12272+ (call & 0xC0000000U) == 0x40000000U &&
12273+ nop == 0x01000000U)
12274+ {
12275+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
12276+
12277+ regs->u_regs[UREG_RETPC] = regs->pc;
12278+ regs->pc = dl_resolve;
12279+ regs->npc = dl_resolve+4;
12280+ return 3;
12281+ }
12282+ } while (0);
12283+#endif
12284+
12285+ return 1;
12286+}
12287+
12288+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
12289+{
12290+ unsigned long i;
12291+
12292+ printk(KERN_ERR "PAX: bytes at PC: ");
12293+ for (i = 0; i < 8; i++) {
12294+ unsigned int c;
12295+ if (get_user(c, (unsigned int *)pc+i))
12296+ printk(KERN_CONT "???????? ");
12297+ else
12298+ printk(KERN_CONT "%08x ", c);
12299+ }
12300+ printk("\n");
12301+}
12302+#endif
12303+
12304 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
12305 int text_fault)
12306 {
12307@@ -226,6 +500,24 @@ good_area:
12308 if (!(vma->vm_flags & VM_WRITE))
12309 goto bad_area;
12310 } else {
12311+
12312+#ifdef CONFIG_PAX_PAGEEXEC
12313+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
12314+ up_read(&mm->mmap_sem);
12315+ switch (pax_handle_fetch_fault(regs)) {
12316+
12317+#ifdef CONFIG_PAX_EMUPLT
12318+ case 2:
12319+ case 3:
12320+ return;
12321+#endif
12322+
12323+ }
12324+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
12325+ do_group_exit(SIGKILL);
12326+ }
12327+#endif
12328+
12329 /* Allow reads even for write-only mappings */
12330 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
12331 goto bad_area;
12332diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
12333index 18fcd71..e4fe821 100644
12334--- a/arch/sparc/mm/fault_64.c
12335+++ b/arch/sparc/mm/fault_64.c
12336@@ -22,6 +22,9 @@
12337 #include <linux/kdebug.h>
12338 #include <linux/percpu.h>
12339 #include <linux/context_tracking.h>
12340+#include <linux/slab.h>
12341+#include <linux/pagemap.h>
12342+#include <linux/compiler.h>
12343
12344 #include <asm/page.h>
12345 #include <asm/pgtable.h>
12346@@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
12347 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
12348 regs->tpc);
12349 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
12350- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
12351+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
12352 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
12353 dump_stack();
12354 unhandled_fault(regs->tpc, current, regs);
12355@@ -279,6 +282,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
12356 show_regs(regs);
12357 }
12358
12359+#ifdef CONFIG_PAX_PAGEEXEC
12360+#ifdef CONFIG_PAX_DLRESOLVE
12361+static void pax_emuplt_close(struct vm_area_struct *vma)
12362+{
12363+ vma->vm_mm->call_dl_resolve = 0UL;
12364+}
12365+
12366+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
12367+{
12368+ unsigned int *kaddr;
12369+
12370+ vmf->page = alloc_page(GFP_HIGHUSER);
12371+ if (!vmf->page)
12372+ return VM_FAULT_OOM;
12373+
12374+ kaddr = kmap(vmf->page);
12375+ memset(kaddr, 0, PAGE_SIZE);
12376+ kaddr[0] = 0x9DE3BFA8U; /* save */
12377+ flush_dcache_page(vmf->page);
12378+ kunmap(vmf->page);
12379+ return VM_FAULT_MAJOR;
12380+}
12381+
12382+static const struct vm_operations_struct pax_vm_ops = {
12383+ .close = pax_emuplt_close,
12384+ .fault = pax_emuplt_fault
12385+};
12386+
12387+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
12388+{
12389+ int ret;
12390+
12391+ INIT_LIST_HEAD(&vma->anon_vma_chain);
12392+ vma->vm_mm = current->mm;
12393+ vma->vm_start = addr;
12394+ vma->vm_end = addr + PAGE_SIZE;
12395+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
12396+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
12397+ vma->vm_ops = &pax_vm_ops;
12398+
12399+ ret = insert_vm_struct(current->mm, vma);
12400+ if (ret)
12401+ return ret;
12402+
12403+ ++current->mm->total_vm;
12404+ return 0;
12405+}
12406+#endif
12407+
12408+/*
12409+ * PaX: decide what to do with offenders (regs->tpc = fault address)
12410+ *
12411+ * returns 1 when task should be killed
12412+ * 2 when patched PLT trampoline was detected
12413+ * 3 when unpatched PLT trampoline was detected
12414+ */
12415+static int pax_handle_fetch_fault(struct pt_regs *regs)
12416+{
12417+
12418+#ifdef CONFIG_PAX_EMUPLT
12419+ int err;
12420+
12421+ do { /* PaX: patched PLT emulation #1 */
12422+ unsigned int sethi1, sethi2, jmpl;
12423+
12424+ err = get_user(sethi1, (unsigned int *)regs->tpc);
12425+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
12426+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
12427+
12428+ if (err)
12429+ break;
12430+
12431+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
12432+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
12433+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
12434+ {
12435+ unsigned long addr;
12436+
12437+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
12438+ addr = regs->u_regs[UREG_G1];
12439+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12440+
12441+ if (test_thread_flag(TIF_32BIT))
12442+ addr &= 0xFFFFFFFFUL;
12443+
12444+ regs->tpc = addr;
12445+ regs->tnpc = addr+4;
12446+ return 2;
12447+ }
12448+ } while (0);
12449+
12450+ do { /* PaX: patched PLT emulation #2 */
12451+ unsigned int ba;
12452+
12453+ err = get_user(ba, (unsigned int *)regs->tpc);
12454+
12455+ if (err)
12456+ break;
12457+
12458+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
12459+ unsigned long addr;
12460+
12461+ if ((ba & 0xFFC00000U) == 0x30800000U)
12462+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
12463+ else
12464+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12465+
12466+ if (test_thread_flag(TIF_32BIT))
12467+ addr &= 0xFFFFFFFFUL;
12468+
12469+ regs->tpc = addr;
12470+ regs->tnpc = addr+4;
12471+ return 2;
12472+ }
12473+ } while (0);
12474+
12475+ do { /* PaX: patched PLT emulation #3 */
12476+ unsigned int sethi, bajmpl, nop;
12477+
12478+ err = get_user(sethi, (unsigned int *)regs->tpc);
12479+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
12480+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12481+
12482+ if (err)
12483+ break;
12484+
12485+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12486+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
12487+ nop == 0x01000000U)
12488+ {
12489+ unsigned long addr;
12490+
12491+ addr = (sethi & 0x003FFFFFU) << 10;
12492+ regs->u_regs[UREG_G1] = addr;
12493+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
12494+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12495+ else
12496+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12497+
12498+ if (test_thread_flag(TIF_32BIT))
12499+ addr &= 0xFFFFFFFFUL;
12500+
12501+ regs->tpc = addr;
12502+ regs->tnpc = addr+4;
12503+ return 2;
12504+ }
12505+ } while (0);
12506+
12507+ do { /* PaX: patched PLT emulation #4 */
12508+ unsigned int sethi, mov1, call, mov2;
12509+
12510+ err = get_user(sethi, (unsigned int *)regs->tpc);
12511+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
12512+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
12513+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
12514+
12515+ if (err)
12516+ break;
12517+
12518+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12519+ mov1 == 0x8210000FU &&
12520+ (call & 0xC0000000U) == 0x40000000U &&
12521+ mov2 == 0x9E100001U)
12522+ {
12523+ unsigned long addr;
12524+
12525+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
12526+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12527+
12528+ if (test_thread_flag(TIF_32BIT))
12529+ addr &= 0xFFFFFFFFUL;
12530+
12531+ regs->tpc = addr;
12532+ regs->tnpc = addr+4;
12533+ return 2;
12534+ }
12535+ } while (0);
12536+
12537+ do { /* PaX: patched PLT emulation #5 */
12538+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
12539+
12540+ err = get_user(sethi, (unsigned int *)regs->tpc);
12541+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
12542+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
12543+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
12544+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
12545+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
12546+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
12547+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
12548+
12549+ if (err)
12550+ break;
12551+
12552+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12553+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
12554+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12555+ (or1 & 0xFFFFE000U) == 0x82106000U &&
12556+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12557+ sllx == 0x83287020U &&
12558+ jmpl == 0x81C04005U &&
12559+ nop == 0x01000000U)
12560+ {
12561+ unsigned long addr;
12562+
12563+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12564+ regs->u_regs[UREG_G1] <<= 32;
12565+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12566+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
12567+ regs->tpc = addr;
12568+ regs->tnpc = addr+4;
12569+ return 2;
12570+ }
12571+ } while (0);
12572+
12573+ do { /* PaX: patched PLT emulation #6 */
12574+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
12575+
12576+ err = get_user(sethi, (unsigned int *)regs->tpc);
12577+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
12578+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
12579+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
12580+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
12581+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
12582+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
12583+
12584+ if (err)
12585+ break;
12586+
12587+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12588+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
12589+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12590+ sllx == 0x83287020U &&
12591+ (or & 0xFFFFE000U) == 0x8A116000U &&
12592+ jmpl == 0x81C04005U &&
12593+ nop == 0x01000000U)
12594+ {
12595+ unsigned long addr;
12596+
12597+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
12598+ regs->u_regs[UREG_G1] <<= 32;
12599+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
12600+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
12601+ regs->tpc = addr;
12602+ regs->tnpc = addr+4;
12603+ return 2;
12604+ }
12605+ } while (0);
12606+
12607+ do { /* PaX: unpatched PLT emulation step 1 */
12608+ unsigned int sethi, ba, nop;
12609+
12610+ err = get_user(sethi, (unsigned int *)regs->tpc);
12611+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12612+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12613+
12614+ if (err)
12615+ break;
12616+
12617+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12618+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
12619+ nop == 0x01000000U)
12620+ {
12621+ unsigned long addr;
12622+ unsigned int save, call;
12623+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
12624+
12625+ if ((ba & 0xFFC00000U) == 0x30800000U)
12626+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
12627+ else
12628+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12629+
12630+ if (test_thread_flag(TIF_32BIT))
12631+ addr &= 0xFFFFFFFFUL;
12632+
12633+ err = get_user(save, (unsigned int *)addr);
12634+ err |= get_user(call, (unsigned int *)(addr+4));
12635+ err |= get_user(nop, (unsigned int *)(addr+8));
12636+ if (err)
12637+ break;
12638+
12639+#ifdef CONFIG_PAX_DLRESOLVE
12640+ if (save == 0x9DE3BFA8U &&
12641+ (call & 0xC0000000U) == 0x40000000U &&
12642+ nop == 0x01000000U)
12643+ {
12644+ struct vm_area_struct *vma;
12645+ unsigned long call_dl_resolve;
12646+
12647+ down_read(&current->mm->mmap_sem);
12648+ call_dl_resolve = current->mm->call_dl_resolve;
12649+ up_read(&current->mm->mmap_sem);
12650+ if (likely(call_dl_resolve))
12651+ goto emulate;
12652+
12653+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
12654+
12655+ down_write(&current->mm->mmap_sem);
12656+ if (current->mm->call_dl_resolve) {
12657+ call_dl_resolve = current->mm->call_dl_resolve;
12658+ up_write(&current->mm->mmap_sem);
12659+ if (vma)
12660+ kmem_cache_free(vm_area_cachep, vma);
12661+ goto emulate;
12662+ }
12663+
12664+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
12665+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
12666+ up_write(&current->mm->mmap_sem);
12667+ if (vma)
12668+ kmem_cache_free(vm_area_cachep, vma);
12669+ return 1;
12670+ }
12671+
12672+ if (pax_insert_vma(vma, call_dl_resolve)) {
12673+ up_write(&current->mm->mmap_sem);
12674+ kmem_cache_free(vm_area_cachep, vma);
12675+ return 1;
12676+ }
12677+
12678+ current->mm->call_dl_resolve = call_dl_resolve;
12679+ up_write(&current->mm->mmap_sem);
12680+
12681+emulate:
12682+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12683+ regs->tpc = call_dl_resolve;
12684+ regs->tnpc = addr+4;
12685+ return 3;
12686+ }
12687+#endif
12688+
12689+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
12690+ if ((save & 0xFFC00000U) == 0x05000000U &&
12691+ (call & 0xFFFFE000U) == 0x85C0A000U &&
12692+ nop == 0x01000000U)
12693+ {
12694+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12695+ regs->u_regs[UREG_G2] = addr + 4;
12696+ addr = (save & 0x003FFFFFU) << 10;
12697+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12698+
12699+ if (test_thread_flag(TIF_32BIT))
12700+ addr &= 0xFFFFFFFFUL;
12701+
12702+ regs->tpc = addr;
12703+ regs->tnpc = addr+4;
12704+ return 3;
12705+ }
12706+
12707+ /* PaX: 64-bit PLT stub */
12708+ err = get_user(sethi1, (unsigned int *)addr);
12709+ err |= get_user(sethi2, (unsigned int *)(addr+4));
12710+ err |= get_user(or1, (unsigned int *)(addr+8));
12711+ err |= get_user(or2, (unsigned int *)(addr+12));
12712+ err |= get_user(sllx, (unsigned int *)(addr+16));
12713+ err |= get_user(add, (unsigned int *)(addr+20));
12714+ err |= get_user(jmpl, (unsigned int *)(addr+24));
12715+ err |= get_user(nop, (unsigned int *)(addr+28));
12716+ if (err)
12717+ break;
12718+
12719+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
12720+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12721+ (or1 & 0xFFFFE000U) == 0x88112000U &&
12722+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12723+ sllx == 0x89293020U &&
12724+ add == 0x8A010005U &&
12725+ jmpl == 0x89C14000U &&
12726+ nop == 0x01000000U)
12727+ {
12728+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12729+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12730+ regs->u_regs[UREG_G4] <<= 32;
12731+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12732+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
12733+ regs->u_regs[UREG_G4] = addr + 24;
12734+ addr = regs->u_regs[UREG_G5];
12735+ regs->tpc = addr;
12736+ regs->tnpc = addr+4;
12737+ return 3;
12738+ }
12739+ }
12740+ } while (0);
12741+
12742+#ifdef CONFIG_PAX_DLRESOLVE
12743+ do { /* PaX: unpatched PLT emulation step 2 */
12744+ unsigned int save, call, nop;
12745+
12746+ err = get_user(save, (unsigned int *)(regs->tpc-4));
12747+ err |= get_user(call, (unsigned int *)regs->tpc);
12748+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
12749+ if (err)
12750+ break;
12751+
12752+ if (save == 0x9DE3BFA8U &&
12753+ (call & 0xC0000000U) == 0x40000000U &&
12754+ nop == 0x01000000U)
12755+ {
12756+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12757+
12758+ if (test_thread_flag(TIF_32BIT))
12759+ dl_resolve &= 0xFFFFFFFFUL;
12760+
12761+ regs->u_regs[UREG_RETPC] = regs->tpc;
12762+ regs->tpc = dl_resolve;
12763+ regs->tnpc = dl_resolve+4;
12764+ return 3;
12765+ }
12766+ } while (0);
12767+#endif
12768+
12769+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
12770+ unsigned int sethi, ba, nop;
12771+
12772+ err = get_user(sethi, (unsigned int *)regs->tpc);
12773+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12774+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12775+
12776+ if (err)
12777+ break;
12778+
12779+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12780+ (ba & 0xFFF00000U) == 0x30600000U &&
12781+ nop == 0x01000000U)
12782+ {
12783+ unsigned long addr;
12784+
12785+ addr = (sethi & 0x003FFFFFU) << 10;
12786+ regs->u_regs[UREG_G1] = addr;
12787+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12788+
12789+ if (test_thread_flag(TIF_32BIT))
12790+ addr &= 0xFFFFFFFFUL;
12791+
12792+ regs->tpc = addr;
12793+ regs->tnpc = addr+4;
12794+ return 2;
12795+ }
12796+ } while (0);
12797+
12798+#endif
12799+
12800+ return 1;
12801+}
12802+
12803+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
12804+{
12805+ unsigned long i;
12806+
12807+ printk(KERN_ERR "PAX: bytes at PC: ");
12808+ for (i = 0; i < 8; i++) {
12809+ unsigned int c;
12810+ if (get_user(c, (unsigned int *)pc+i))
12811+ printk(KERN_CONT "???????? ");
12812+ else
12813+ printk(KERN_CONT "%08x ", c);
12814+ }
12815+ printk("\n");
12816+}
12817+#endif
12818+
12819 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
12820 {
12821 enum ctx_state prev_state = exception_enter();
12822@@ -353,6 +816,29 @@ retry:
12823 if (!vma)
12824 goto bad_area;
12825
12826+#ifdef CONFIG_PAX_PAGEEXEC
12827+ /* PaX: detect ITLB misses on non-exec pages */
12828+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
12829+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
12830+ {
12831+ if (address != regs->tpc)
12832+ goto good_area;
12833+
12834+ up_read(&mm->mmap_sem);
12835+ switch (pax_handle_fetch_fault(regs)) {
12836+
12837+#ifdef CONFIG_PAX_EMUPLT
12838+ case 2:
12839+ case 3:
12840+ return;
12841+#endif
12842+
12843+ }
12844+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
12845+ do_group_exit(SIGKILL);
12846+ }
12847+#endif
12848+
12849 /* Pure DTLB misses do not tell us whether the fault causing
12850 * load/store/atomic was a write or not, it only says that there
12851 * was no match. So in such a case we (carefully) read the
12852diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
12853index d329537..2c3746a 100644
12854--- a/arch/sparc/mm/hugetlbpage.c
12855+++ b/arch/sparc/mm/hugetlbpage.c
12856@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12857 unsigned long addr,
12858 unsigned long len,
12859 unsigned long pgoff,
12860- unsigned long flags)
12861+ unsigned long flags,
12862+ unsigned long offset)
12863 {
12864+ struct mm_struct *mm = current->mm;
12865 unsigned long task_size = TASK_SIZE;
12866 struct vm_unmapped_area_info info;
12867
12868@@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12869
12870 info.flags = 0;
12871 info.length = len;
12872- info.low_limit = TASK_UNMAPPED_BASE;
12873+ info.low_limit = mm->mmap_base;
12874 info.high_limit = min(task_size, VA_EXCLUDE_START);
12875 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12876 info.align_offset = 0;
12877+ info.threadstack_offset = offset;
12878 addr = vm_unmapped_area(&info);
12879
12880 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
12881 VM_BUG_ON(addr != -ENOMEM);
12882 info.low_limit = VA_EXCLUDE_END;
12883+
12884+#ifdef CONFIG_PAX_RANDMMAP
12885+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12886+ info.low_limit += mm->delta_mmap;
12887+#endif
12888+
12889 info.high_limit = task_size;
12890 addr = vm_unmapped_area(&info);
12891 }
12892@@ -55,7 +64,8 @@ static unsigned long
12893 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12894 const unsigned long len,
12895 const unsigned long pgoff,
12896- const unsigned long flags)
12897+ const unsigned long flags,
12898+ const unsigned long offset)
12899 {
12900 struct mm_struct *mm = current->mm;
12901 unsigned long addr = addr0;
12902@@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12903 info.high_limit = mm->mmap_base;
12904 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12905 info.align_offset = 0;
12906+ info.threadstack_offset = offset;
12907 addr = vm_unmapped_area(&info);
12908
12909 /*
12910@@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12911 VM_BUG_ON(addr != -ENOMEM);
12912 info.flags = 0;
12913 info.low_limit = TASK_UNMAPPED_BASE;
12914+
12915+#ifdef CONFIG_PAX_RANDMMAP
12916+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12917+ info.low_limit += mm->delta_mmap;
12918+#endif
12919+
12920 info.high_limit = STACK_TOP32;
12921 addr = vm_unmapped_area(&info);
12922 }
12923@@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12924 struct mm_struct *mm = current->mm;
12925 struct vm_area_struct *vma;
12926 unsigned long task_size = TASK_SIZE;
12927+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
12928
12929 if (test_thread_flag(TIF_32BIT))
12930 task_size = STACK_TOP32;
12931@@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12932 return addr;
12933 }
12934
12935+#ifdef CONFIG_PAX_RANDMMAP
12936+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
12937+#endif
12938+
12939 if (addr) {
12940 addr = ALIGN(addr, HPAGE_SIZE);
12941 vma = find_vma(mm, addr);
12942- if (task_size - len >= addr &&
12943- (!vma || addr + len <= vma->vm_start))
12944+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
12945 return addr;
12946 }
12947 if (mm->get_unmapped_area == arch_get_unmapped_area)
12948 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
12949- pgoff, flags);
12950+ pgoff, flags, offset);
12951 else
12952 return hugetlb_get_unmapped_area_topdown(file, addr, len,
12953- pgoff, flags);
12954+ pgoff, flags, offset);
12955 }
12956
12957 pte_t *huge_pte_alloc(struct mm_struct *mm,
12958diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
12959index 04bc826..0fefab9 100644
12960--- a/arch/sparc/mm/init_64.c
12961+++ b/arch/sparc/mm/init_64.c
12962@@ -186,9 +186,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
12963 int num_kernel_image_mappings;
12964
12965 #ifdef CONFIG_DEBUG_DCFLUSH
12966-atomic_t dcpage_flushes = ATOMIC_INIT(0);
12967+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
12968 #ifdef CONFIG_SMP
12969-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12970+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12971 #endif
12972 #endif
12973
12974@@ -196,7 +196,7 @@ inline void flush_dcache_page_impl(struct page *page)
12975 {
12976 BUG_ON(tlb_type == hypervisor);
12977 #ifdef CONFIG_DEBUG_DCFLUSH
12978- atomic_inc(&dcpage_flushes);
12979+ atomic_inc_unchecked(&dcpage_flushes);
12980 #endif
12981
12982 #ifdef DCACHE_ALIASING_POSSIBLE
12983@@ -468,10 +468,10 @@ void mmu_info(struct seq_file *m)
12984
12985 #ifdef CONFIG_DEBUG_DCFLUSH
12986 seq_printf(m, "DCPageFlushes\t: %d\n",
12987- atomic_read(&dcpage_flushes));
12988+ atomic_read_unchecked(&dcpage_flushes));
12989 #ifdef CONFIG_SMP
12990 seq_printf(m, "DCPageFlushesXC\t: %d\n",
12991- atomic_read(&dcpage_flushes_xcall));
12992+ atomic_read_unchecked(&dcpage_flushes_xcall));
12993 #endif /* CONFIG_SMP */
12994 #endif /* CONFIG_DEBUG_DCFLUSH */
12995 }
12996diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
12997index ece4af0..f04b862 100644
12998--- a/arch/sparc/net/bpf_jit_comp.c
12999+++ b/arch/sparc/net/bpf_jit_comp.c
13000@@ -823,5 +823,6 @@ void bpf_jit_free(struct bpf_prog *fp)
13001 {
13002 if (fp->jited)
13003 module_free(NULL, fp->bpf_func);
13004- kfree(fp);
13005+
13006+ bpf_prog_unlock_free(fp);
13007 }
13008diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
13009index 7fcd492..1311074 100644
13010--- a/arch/tile/Kconfig
13011+++ b/arch/tile/Kconfig
13012@@ -191,6 +191,7 @@ source "kernel/Kconfig.hz"
13013
13014 config KEXEC
13015 bool "kexec system call"
13016+ depends on !GRKERNSEC_KMEM
13017 ---help---
13018 kexec is a system call that implements the ability to shutdown your
13019 current kernel, and to start another kernel. It is like a reboot
13020diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
13021index 7b11c5f..755a026 100644
13022--- a/arch/tile/include/asm/atomic_64.h
13023+++ b/arch/tile/include/asm/atomic_64.h
13024@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
13025
13026 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
13027
13028+#define atomic64_read_unchecked(v) atomic64_read(v)
13029+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
13030+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
13031+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
13032+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
13033+#define atomic64_inc_unchecked(v) atomic64_inc(v)
13034+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
13035+#define atomic64_dec_unchecked(v) atomic64_dec(v)
13036+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
13037+
13038 /* Define this to indicate that cmpxchg is an efficient operation. */
13039 #define __HAVE_ARCH_CMPXCHG
13040
13041diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
13042index 6160761..00cac88 100644
13043--- a/arch/tile/include/asm/cache.h
13044+++ b/arch/tile/include/asm/cache.h
13045@@ -15,11 +15,12 @@
13046 #ifndef _ASM_TILE_CACHE_H
13047 #define _ASM_TILE_CACHE_H
13048
13049+#include <linux/const.h>
13050 #include <arch/chip.h>
13051
13052 /* bytes per L1 data cache line */
13053 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
13054-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
13055+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
13056
13057 /* bytes per L2 cache line */
13058 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
13059diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
13060index b6cde32..c0cb736 100644
13061--- a/arch/tile/include/asm/uaccess.h
13062+++ b/arch/tile/include/asm/uaccess.h
13063@@ -414,9 +414,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
13064 const void __user *from,
13065 unsigned long n)
13066 {
13067- int sz = __compiletime_object_size(to);
13068+ size_t sz = __compiletime_object_size(to);
13069
13070- if (likely(sz == -1 || sz >= n))
13071+ if (likely(sz == (size_t)-1 || sz >= n))
13072 n = _copy_from_user(to, from, n);
13073 else
13074 copy_from_user_overflow();
13075diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
13076index e514899..f8743c4 100644
13077--- a/arch/tile/mm/hugetlbpage.c
13078+++ b/arch/tile/mm/hugetlbpage.c
13079@@ -207,6 +207,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
13080 info.high_limit = TASK_SIZE;
13081 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
13082 info.align_offset = 0;
13083+ info.threadstack_offset = 0;
13084 return vm_unmapped_area(&info);
13085 }
13086
13087@@ -224,6 +225,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
13088 info.high_limit = current->mm->mmap_base;
13089 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
13090 info.align_offset = 0;
13091+ info.threadstack_offset = 0;
13092 addr = vm_unmapped_area(&info);
13093
13094 /*
13095diff --git a/arch/um/Makefile b/arch/um/Makefile
13096index e4b1a96..16162f8 100644
13097--- a/arch/um/Makefile
13098+++ b/arch/um/Makefile
13099@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
13100 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
13101 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
13102
13103+ifdef CONSTIFY_PLUGIN
13104+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
13105+endif
13106+
13107 #This will adjust *FLAGS accordingly to the platform.
13108 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
13109
13110diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
13111index 19e1bdd..3665b77 100644
13112--- a/arch/um/include/asm/cache.h
13113+++ b/arch/um/include/asm/cache.h
13114@@ -1,6 +1,7 @@
13115 #ifndef __UM_CACHE_H
13116 #define __UM_CACHE_H
13117
13118+#include <linux/const.h>
13119
13120 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
13121 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
13122@@ -12,6 +13,6 @@
13123 # define L1_CACHE_SHIFT 5
13124 #endif
13125
13126-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
13127+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
13128
13129 #endif
13130diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
13131index 2e0a6b1..a64d0f5 100644
13132--- a/arch/um/include/asm/kmap_types.h
13133+++ b/arch/um/include/asm/kmap_types.h
13134@@ -8,6 +8,6 @@
13135
13136 /* No more #include "asm/arch/kmap_types.h" ! */
13137
13138-#define KM_TYPE_NR 14
13139+#define KM_TYPE_NR 15
13140
13141 #endif
13142diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
13143index 71c5d13..4c7b9f1 100644
13144--- a/arch/um/include/asm/page.h
13145+++ b/arch/um/include/asm/page.h
13146@@ -14,6 +14,9 @@
13147 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
13148 #define PAGE_MASK (~(PAGE_SIZE-1))
13149
13150+#define ktla_ktva(addr) (addr)
13151+#define ktva_ktla(addr) (addr)
13152+
13153 #ifndef __ASSEMBLY__
13154
13155 struct page;
13156diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
13157index 0032f92..cd151e0 100644
13158--- a/arch/um/include/asm/pgtable-3level.h
13159+++ b/arch/um/include/asm/pgtable-3level.h
13160@@ -58,6 +58,7 @@
13161 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
13162 #define pud_populate(mm, pud, pmd) \
13163 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
13164+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
13165
13166 #ifdef CONFIG_64BIT
13167 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
13168diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
13169index f17bca8..48adb87 100644
13170--- a/arch/um/kernel/process.c
13171+++ b/arch/um/kernel/process.c
13172@@ -356,22 +356,6 @@ int singlestepping(void * t)
13173 return 2;
13174 }
13175
13176-/*
13177- * Only x86 and x86_64 have an arch_align_stack().
13178- * All other arches have "#define arch_align_stack(x) (x)"
13179- * in their asm/exec.h
13180- * As this is included in UML from asm-um/system-generic.h,
13181- * we can use it to behave as the subarch does.
13182- */
13183-#ifndef arch_align_stack
13184-unsigned long arch_align_stack(unsigned long sp)
13185-{
13186- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
13187- sp -= get_random_int() % 8192;
13188- return sp & ~0xf;
13189-}
13190-#endif
13191-
13192 unsigned long get_wchan(struct task_struct *p)
13193 {
13194 unsigned long stack_page, sp, ip;
13195diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
13196index ad8f795..2c7eec6 100644
13197--- a/arch/unicore32/include/asm/cache.h
13198+++ b/arch/unicore32/include/asm/cache.h
13199@@ -12,8 +12,10 @@
13200 #ifndef __UNICORE_CACHE_H__
13201 #define __UNICORE_CACHE_H__
13202
13203-#define L1_CACHE_SHIFT (5)
13204-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
13205+#include <linux/const.h>
13206+
13207+#define L1_CACHE_SHIFT 5
13208+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
13209
13210 /*
13211 * Memory returned by kmalloc() may be used for DMA, so we must make
13212diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
13213index 3632743..630a8bb 100644
13214--- a/arch/x86/Kconfig
13215+++ b/arch/x86/Kconfig
13216@@ -130,7 +130,7 @@ config X86
13217 select RTC_LIB
13218 select HAVE_DEBUG_STACKOVERFLOW
13219 select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
13220- select HAVE_CC_STACKPROTECTOR
13221+ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
13222 select GENERIC_CPU_AUTOPROBE
13223 select HAVE_ARCH_AUDITSYSCALL
13224 select ARCH_SUPPORTS_ATOMIC_RMW
13225@@ -258,7 +258,7 @@ config X86_HT
13226
13227 config X86_32_LAZY_GS
13228 def_bool y
13229- depends on X86_32 && !CC_STACKPROTECTOR
13230+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
13231
13232 config ARCH_HWEIGHT_CFLAGS
13233 string
13234@@ -555,6 +555,7 @@ config SCHED_OMIT_FRAME_POINTER
13235
13236 menuconfig HYPERVISOR_GUEST
13237 bool "Linux guest support"
13238+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
13239 ---help---
13240 Say Y here to enable options for running Linux under various hyper-
13241 visors. This option enables basic hypervisor detection and platform
13242@@ -1083,6 +1084,7 @@ choice
13243
13244 config NOHIGHMEM
13245 bool "off"
13246+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
13247 ---help---
13248 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
13249 However, the address space of 32-bit x86 processors is only 4
13250@@ -1119,6 +1121,7 @@ config NOHIGHMEM
13251
13252 config HIGHMEM4G
13253 bool "4GB"
13254+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
13255 ---help---
13256 Select this if you have a 32-bit processor and between 1 and 4
13257 gigabytes of physical RAM.
13258@@ -1171,7 +1174,7 @@ config PAGE_OFFSET
13259 hex
13260 default 0xB0000000 if VMSPLIT_3G_OPT
13261 default 0x80000000 if VMSPLIT_2G
13262- default 0x78000000 if VMSPLIT_2G_OPT
13263+ default 0x70000000 if VMSPLIT_2G_OPT
13264 default 0x40000000 if VMSPLIT_1G
13265 default 0xC0000000
13266 depends on X86_32
13267@@ -1586,6 +1589,7 @@ source kernel/Kconfig.hz
13268
13269 config KEXEC
13270 bool "kexec system call"
13271+ depends on !GRKERNSEC_KMEM
13272 ---help---
13273 kexec is a system call that implements the ability to shutdown your
13274 current kernel, and to start another kernel. It is like a reboot
13275@@ -1771,7 +1775,9 @@ config X86_NEED_RELOCS
13276
13277 config PHYSICAL_ALIGN
13278 hex "Alignment value to which kernel should be aligned"
13279- default "0x200000"
13280+ default "0x1000000"
13281+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
13282+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
13283 range 0x2000 0x1000000 if X86_32
13284 range 0x200000 0x1000000 if X86_64
13285 ---help---
13286@@ -1854,6 +1860,7 @@ config COMPAT_VDSO
13287 def_bool n
13288 prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
13289 depends on X86_32 || IA32_EMULATION
13290+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
13291 ---help---
13292 Certain buggy versions of glibc will crash if they are
13293 presented with a 32-bit vDSO that is not mapped at the address
13294diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
13295index 6983314..54ad7e8 100644
13296--- a/arch/x86/Kconfig.cpu
13297+++ b/arch/x86/Kconfig.cpu
13298@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
13299
13300 config X86_F00F_BUG
13301 def_bool y
13302- depends on M586MMX || M586TSC || M586 || M486
13303+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
13304
13305 config X86_INVD_BUG
13306 def_bool y
13307@@ -327,7 +327,7 @@ config X86_INVD_BUG
13308
13309 config X86_ALIGNMENT_16
13310 def_bool y
13311- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
13312+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
13313
13314 config X86_INTEL_USERCOPY
13315 def_bool y
13316@@ -369,7 +369,7 @@ config X86_CMPXCHG64
13317 # generates cmov.
13318 config X86_CMOV
13319 def_bool y
13320- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
13321+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
13322
13323 config X86_MINIMUM_CPU_FAMILY
13324 int
13325diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
13326index 61bd2ad..50b625d 100644
13327--- a/arch/x86/Kconfig.debug
13328+++ b/arch/x86/Kconfig.debug
13329@@ -93,7 +93,7 @@ config EFI_PGT_DUMP
13330 config DEBUG_RODATA
13331 bool "Write protect kernel read-only data structures"
13332 default y
13333- depends on DEBUG_KERNEL
13334+ depends on DEBUG_KERNEL && BROKEN
13335 ---help---
13336 Mark the kernel read-only data as write-protected in the pagetables,
13337 in order to catch accidental (and incorrect) writes to such const
13338@@ -111,7 +111,7 @@ config DEBUG_RODATA_TEST
13339
13340 config DEBUG_SET_MODULE_RONX
13341 bool "Set loadable kernel module data as NX and text as RO"
13342- depends on MODULES
13343+ depends on MODULES && BROKEN
13344 ---help---
13345 This option helps catch unintended modifications to loadable
13346 kernel module's text and read-only data. It also prevents execution
13347diff --git a/arch/x86/Makefile b/arch/x86/Makefile
13348index 60087ca..9d9500e 100644
13349--- a/arch/x86/Makefile
13350+++ b/arch/x86/Makefile
13351@@ -68,9 +68,6 @@ ifeq ($(CONFIG_X86_32),y)
13352 # CPU-specific tuning. Anything which can be shared with UML should go here.
13353 include $(srctree)/arch/x86/Makefile_32.cpu
13354 KBUILD_CFLAGS += $(cflags-y)
13355-
13356- # temporary until string.h is fixed
13357- KBUILD_CFLAGS += -ffreestanding
13358 else
13359 BITS := 64
13360 UTS_MACHINE := x86_64
13361@@ -111,6 +108,9 @@ else
13362 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
13363 endif
13364
13365+# temporary until string.h is fixed
13366+KBUILD_CFLAGS += -ffreestanding
13367+
13368 # Make sure compiler does not have buggy stack-protector support.
13369 ifdef CONFIG_CC_STACKPROTECTOR
13370 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
13371@@ -184,6 +184,7 @@ archheaders:
13372 $(Q)$(MAKE) $(build)=arch/x86/syscalls all
13373
13374 archprepare:
13375+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
13376 ifeq ($(CONFIG_KEXEC_FILE),y)
13377 $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
13378 endif
13379@@ -274,3 +275,9 @@ define archhelp
13380 echo ' FDINITRD=file initrd for the booted kernel'
13381 echo ' kvmconfig - Enable additional options for guest kernel support'
13382 endef
13383+
13384+define OLD_LD
13385+
13386+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
13387+*** Please upgrade your binutils to 2.18 or newer
13388+endef
13389diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
13390index dbe8dd2..2f0a98f 100644
13391--- a/arch/x86/boot/Makefile
13392+++ b/arch/x86/boot/Makefile
13393@@ -52,6 +52,9 @@ $(obj)/cpustr.h: $(obj)/mkcpustr FORCE
13394 # ---------------------------------------------------------------------------
13395
13396 KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
13397+ifdef CONSTIFY_PLUGIN
13398+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
13399+endif
13400 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13401 GCOV_PROFILE := n
13402
13403diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
13404index 878e4b9..20537ab 100644
13405--- a/arch/x86/boot/bitops.h
13406+++ b/arch/x86/boot/bitops.h
13407@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
13408 u8 v;
13409 const u32 *p = (const u32 *)addr;
13410
13411- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
13412+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
13413 return v;
13414 }
13415
13416@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
13417
13418 static inline void set_bit(int nr, void *addr)
13419 {
13420- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
13421+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
13422 }
13423
13424 #endif /* BOOT_BITOPS_H */
13425diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
13426index bd49ec6..94c7f58 100644
13427--- a/arch/x86/boot/boot.h
13428+++ b/arch/x86/boot/boot.h
13429@@ -84,7 +84,7 @@ static inline void io_delay(void)
13430 static inline u16 ds(void)
13431 {
13432 u16 seg;
13433- asm("movw %%ds,%0" : "=rm" (seg));
13434+ asm volatile("movw %%ds,%0" : "=rm" (seg));
13435 return seg;
13436 }
13437
13438diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
13439index 0fcd913..3bb5c42 100644
13440--- a/arch/x86/boot/compressed/Makefile
13441+++ b/arch/x86/boot/compressed/Makefile
13442@@ -16,6 +16,9 @@ KBUILD_CFLAGS += $(cflags-y)
13443 KBUILD_CFLAGS += -mno-mmx -mno-sse
13444 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
13445 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
13446+ifdef CONSTIFY_PLUGIN
13447+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
13448+endif
13449
13450 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13451 GCOV_PROFILE := n
13452diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
13453index a53440e..c3dbf1e 100644
13454--- a/arch/x86/boot/compressed/efi_stub_32.S
13455+++ b/arch/x86/boot/compressed/efi_stub_32.S
13456@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
13457 * parameter 2, ..., param n. To make things easy, we save the return
13458 * address of efi_call_phys in a global variable.
13459 */
13460- popl %ecx
13461- movl %ecx, saved_return_addr(%edx)
13462- /* get the function pointer into ECX*/
13463- popl %ecx
13464- movl %ecx, efi_rt_function_ptr(%edx)
13465+ popl saved_return_addr(%edx)
13466+ popl efi_rt_function_ptr(%edx)
13467
13468 /*
13469 * 3. Call the physical function.
13470 */
13471- call *%ecx
13472+ call *efi_rt_function_ptr(%edx)
13473
13474 /*
13475 * 4. Balance the stack. And because EAX contain the return value,
13476@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
13477 1: popl %edx
13478 subl $1b, %edx
13479
13480- movl efi_rt_function_ptr(%edx), %ecx
13481- pushl %ecx
13482+ pushl efi_rt_function_ptr(%edx)
13483
13484 /*
13485 * 10. Push the saved return address onto the stack and return.
13486 */
13487- movl saved_return_addr(%edx), %ecx
13488- pushl %ecx
13489- ret
13490+ jmpl *saved_return_addr(%edx)
13491 ENDPROC(efi_call_phys)
13492 .previous
13493
13494diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
13495index cbed140..5f2ca57 100644
13496--- a/arch/x86/boot/compressed/head_32.S
13497+++ b/arch/x86/boot/compressed/head_32.S
13498@@ -140,10 +140,10 @@ preferred_addr:
13499 addl %eax, %ebx
13500 notl %eax
13501 andl %eax, %ebx
13502- cmpl $LOAD_PHYSICAL_ADDR, %ebx
13503+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
13504 jge 1f
13505 #endif
13506- movl $LOAD_PHYSICAL_ADDR, %ebx
13507+ movl $____LOAD_PHYSICAL_ADDR, %ebx
13508 1:
13509
13510 /* Target address to relocate to for decompression */
13511diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
13512index 2884e0c..904a2f7 100644
13513--- a/arch/x86/boot/compressed/head_64.S
13514+++ b/arch/x86/boot/compressed/head_64.S
13515@@ -94,10 +94,10 @@ ENTRY(startup_32)
13516 addl %eax, %ebx
13517 notl %eax
13518 andl %eax, %ebx
13519- cmpl $LOAD_PHYSICAL_ADDR, %ebx
13520+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
13521 jge 1f
13522 #endif
13523- movl $LOAD_PHYSICAL_ADDR, %ebx
13524+ movl $____LOAD_PHYSICAL_ADDR, %ebx
13525 1:
13526
13527 /* Target address to relocate to for decompression */
13528@@ -322,10 +322,10 @@ preferred_addr:
13529 addq %rax, %rbp
13530 notq %rax
13531 andq %rax, %rbp
13532- cmpq $LOAD_PHYSICAL_ADDR, %rbp
13533+ cmpq $____LOAD_PHYSICAL_ADDR, %rbp
13534 jge 1f
13535 #endif
13536- movq $LOAD_PHYSICAL_ADDR, %rbp
13537+ movq $____LOAD_PHYSICAL_ADDR, %rbp
13538 1:
13539
13540 /* Target address to relocate to for decompression */
13541@@ -431,8 +431,8 @@ gdt:
13542 .long gdt
13543 .word 0
13544 .quad 0x0000000000000000 /* NULL descriptor */
13545- .quad 0x00af9a000000ffff /* __KERNEL_CS */
13546- .quad 0x00cf92000000ffff /* __KERNEL_DS */
13547+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
13548+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
13549 .quad 0x0080890000000000 /* TS descriptor */
13550 .quad 0x0000000000000000 /* TS continued */
13551 gdt_end:
13552diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
13553index 57ab74d..7c52182 100644
13554--- a/arch/x86/boot/compressed/misc.c
13555+++ b/arch/x86/boot/compressed/misc.c
13556@@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
13557 * Calculate the delta between where vmlinux was linked to load
13558 * and where it was actually loaded.
13559 */
13560- delta = min_addr - LOAD_PHYSICAL_ADDR;
13561+ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
13562 if (!delta) {
13563 debug_putstr("No relocation needed... ");
13564 return;
13565@@ -312,7 +312,7 @@ static void parse_elf(void *output)
13566 Elf32_Ehdr ehdr;
13567 Elf32_Phdr *phdrs, *phdr;
13568 #endif
13569- void *dest;
13570+ void *dest, *prev;
13571 int i;
13572
13573 memcpy(&ehdr, output, sizeof(ehdr));
13574@@ -339,13 +339,16 @@ static void parse_elf(void *output)
13575 case PT_LOAD:
13576 #ifdef CONFIG_RELOCATABLE
13577 dest = output;
13578- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
13579+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
13580 #else
13581 dest = (void *)(phdr->p_paddr);
13582 #endif
13583 memcpy(dest,
13584 output + phdr->p_offset,
13585 phdr->p_filesz);
13586+ if (i)
13587+ memset(prev, 0xff, dest - prev);
13588+ prev = dest + phdr->p_filesz;
13589 break;
13590 default: /* Ignore other PT_* */ break;
13591 }
13592@@ -395,7 +398,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
13593 error("Destination address too large");
13594 #endif
13595 #ifndef CONFIG_RELOCATABLE
13596- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
13597+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
13598 error("Wrong destination address");
13599 #endif
13600
13601diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
13602index 1fd7d57..0f7d096 100644
13603--- a/arch/x86/boot/cpucheck.c
13604+++ b/arch/x86/boot/cpucheck.c
13605@@ -125,9 +125,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13606 u32 ecx = MSR_K7_HWCR;
13607 u32 eax, edx;
13608
13609- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13610+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13611 eax &= ~(1 << 15);
13612- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13613+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13614
13615 get_cpuflags(); /* Make sure it really did something */
13616 err = check_cpuflags();
13617@@ -140,9 +140,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13618 u32 ecx = MSR_VIA_FCR;
13619 u32 eax, edx;
13620
13621- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13622+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13623 eax |= (1<<1)|(1<<7);
13624- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13625+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13626
13627 set_bit(X86_FEATURE_CX8, cpu.flags);
13628 err = check_cpuflags();
13629@@ -153,12 +153,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13630 u32 eax, edx;
13631 u32 level = 1;
13632
13633- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13634- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
13635- asm("cpuid"
13636+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13637+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
13638+ asm volatile("cpuid"
13639 : "+a" (level), "=d" (cpu.flags[0])
13640 : : "ecx", "ebx");
13641- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13642+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13643
13644 err = check_cpuflags();
13645 } else if (err == 0x01 &&
13646diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
13647index 16ef025..91e033b 100644
13648--- a/arch/x86/boot/header.S
13649+++ b/arch/x86/boot/header.S
13650@@ -438,10 +438,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
13651 # single linked list of
13652 # struct setup_data
13653
13654-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
13655+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
13656
13657 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
13658+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13659+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
13660+#else
13661 #define VO_INIT_SIZE (VO__end - VO__text)
13662+#endif
13663 #if ZO_INIT_SIZE > VO_INIT_SIZE
13664 #define INIT_SIZE ZO_INIT_SIZE
13665 #else
13666diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
13667index db75d07..8e6d0af 100644
13668--- a/arch/x86/boot/memory.c
13669+++ b/arch/x86/boot/memory.c
13670@@ -19,7 +19,7 @@
13671
13672 static int detect_memory_e820(void)
13673 {
13674- int count = 0;
13675+ unsigned int count = 0;
13676 struct biosregs ireg, oreg;
13677 struct e820entry *desc = boot_params.e820_map;
13678 static struct e820entry buf; /* static so it is zeroed */
13679diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
13680index ba3e100..6501b8f 100644
13681--- a/arch/x86/boot/video-vesa.c
13682+++ b/arch/x86/boot/video-vesa.c
13683@@ -201,6 +201,7 @@ static void vesa_store_pm_info(void)
13684
13685 boot_params.screen_info.vesapm_seg = oreg.es;
13686 boot_params.screen_info.vesapm_off = oreg.di;
13687+ boot_params.screen_info.vesapm_size = oreg.cx;
13688 }
13689
13690 /*
13691diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
13692index 43eda28..5ab5fdb 100644
13693--- a/arch/x86/boot/video.c
13694+++ b/arch/x86/boot/video.c
13695@@ -96,7 +96,7 @@ static void store_mode_params(void)
13696 static unsigned int get_entry(void)
13697 {
13698 char entry_buf[4];
13699- int i, len = 0;
13700+ unsigned int i, len = 0;
13701 int key;
13702 unsigned int v;
13703
13704diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
13705index 9105655..41779c1 100644
13706--- a/arch/x86/crypto/aes-x86_64-asm_64.S
13707+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
13708@@ -8,6 +8,8 @@
13709 * including this sentence is retained in full.
13710 */
13711
13712+#include <asm/alternative-asm.h>
13713+
13714 .extern crypto_ft_tab
13715 .extern crypto_it_tab
13716 .extern crypto_fl_tab
13717@@ -70,6 +72,8 @@
13718 je B192; \
13719 leaq 32(r9),r9;
13720
13721+#define ret pax_force_retaddr; ret
13722+
13723 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
13724 movq r1,r2; \
13725 movq r3,r4; \
13726diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
13727index 477e9d7..c92c7d8 100644
13728--- a/arch/x86/crypto/aesni-intel_asm.S
13729+++ b/arch/x86/crypto/aesni-intel_asm.S
13730@@ -31,6 +31,7 @@
13731
13732 #include <linux/linkage.h>
13733 #include <asm/inst.h>
13734+#include <asm/alternative-asm.h>
13735
13736 #ifdef __x86_64__
13737 .data
13738@@ -205,7 +206,7 @@ enc: .octa 0x2
13739 * num_initial_blocks = b mod 4
13740 * encrypt the initial num_initial_blocks blocks and apply ghash on
13741 * the ciphertext
13742-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13743+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13744 * are clobbered
13745 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13746 */
13747@@ -214,8 +215,8 @@ enc: .octa 0x2
13748 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13749 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13750 mov arg7, %r10 # %r10 = AAD
13751- mov arg8, %r12 # %r12 = aadLen
13752- mov %r12, %r11
13753+ mov arg8, %r15 # %r15 = aadLen
13754+ mov %r15, %r11
13755 pxor %xmm\i, %xmm\i
13756 _get_AAD_loop\num_initial_blocks\operation:
13757 movd (%r10), \TMP1
13758@@ -223,15 +224,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13759 psrldq $4, %xmm\i
13760 pxor \TMP1, %xmm\i
13761 add $4, %r10
13762- sub $4, %r12
13763+ sub $4, %r15
13764 jne _get_AAD_loop\num_initial_blocks\operation
13765 cmp $16, %r11
13766 je _get_AAD_loop2_done\num_initial_blocks\operation
13767- mov $16, %r12
13768+ mov $16, %r15
13769 _get_AAD_loop2\num_initial_blocks\operation:
13770 psrldq $4, %xmm\i
13771- sub $4, %r12
13772- cmp %r11, %r12
13773+ sub $4, %r15
13774+ cmp %r11, %r15
13775 jne _get_AAD_loop2\num_initial_blocks\operation
13776 _get_AAD_loop2_done\num_initial_blocks\operation:
13777 movdqa SHUF_MASK(%rip), %xmm14
13778@@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation:
13779 * num_initial_blocks = b mod 4
13780 * encrypt the initial num_initial_blocks blocks and apply ghash on
13781 * the ciphertext
13782-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13783+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13784 * are clobbered
13785 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13786 */
13787@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
13788 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13789 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13790 mov arg7, %r10 # %r10 = AAD
13791- mov arg8, %r12 # %r12 = aadLen
13792- mov %r12, %r11
13793+ mov arg8, %r15 # %r15 = aadLen
13794+ mov %r15, %r11
13795 pxor %xmm\i, %xmm\i
13796 _get_AAD_loop\num_initial_blocks\operation:
13797 movd (%r10), \TMP1
13798@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13799 psrldq $4, %xmm\i
13800 pxor \TMP1, %xmm\i
13801 add $4, %r10
13802- sub $4, %r12
13803+ sub $4, %r15
13804 jne _get_AAD_loop\num_initial_blocks\operation
13805 cmp $16, %r11
13806 je _get_AAD_loop2_done\num_initial_blocks\operation
13807- mov $16, %r12
13808+ mov $16, %r15
13809 _get_AAD_loop2\num_initial_blocks\operation:
13810 psrldq $4, %xmm\i
13811- sub $4, %r12
13812- cmp %r11, %r12
13813+ sub $4, %r15
13814+ cmp %r11, %r15
13815 jne _get_AAD_loop2\num_initial_blocks\operation
13816 _get_AAD_loop2_done\num_initial_blocks\operation:
13817 movdqa SHUF_MASK(%rip), %xmm14
13818@@ -1269,7 +1270,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
13819 *
13820 *****************************************************************************/
13821 ENTRY(aesni_gcm_dec)
13822- push %r12
13823+ push %r15
13824 push %r13
13825 push %r14
13826 mov %rsp, %r14
13827@@ -1279,8 +1280,8 @@ ENTRY(aesni_gcm_dec)
13828 */
13829 sub $VARIABLE_OFFSET, %rsp
13830 and $~63, %rsp # align rsp to 64 bytes
13831- mov %arg6, %r12
13832- movdqu (%r12), %xmm13 # %xmm13 = HashKey
13833+ mov %arg6, %r15
13834+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
13835 movdqa SHUF_MASK(%rip), %xmm2
13836 PSHUFB_XMM %xmm2, %xmm13
13837
13838@@ -1308,10 +1309,10 @@ ENTRY(aesni_gcm_dec)
13839 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
13840 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
13841 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
13842- mov %r13, %r12
13843- and $(3<<4), %r12
13844+ mov %r13, %r15
13845+ and $(3<<4), %r15
13846 jz _initial_num_blocks_is_0_decrypt
13847- cmp $(2<<4), %r12
13848+ cmp $(2<<4), %r15
13849 jb _initial_num_blocks_is_1_decrypt
13850 je _initial_num_blocks_is_2_decrypt
13851 _initial_num_blocks_is_3_decrypt:
13852@@ -1361,16 +1362,16 @@ _zero_cipher_left_decrypt:
13853 sub $16, %r11
13854 add %r13, %r11
13855 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
13856- lea SHIFT_MASK+16(%rip), %r12
13857- sub %r13, %r12
13858+ lea SHIFT_MASK+16(%rip), %r15
13859+ sub %r13, %r15
13860 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
13861 # (%r13 is the number of bytes in plaintext mod 16)
13862- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13863+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13864 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
13865
13866 movdqa %xmm1, %xmm2
13867 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
13868- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13869+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13870 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
13871 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
13872 pand %xmm1, %xmm2
13873@@ -1399,9 +1400,9 @@ _less_than_8_bytes_left_decrypt:
13874 sub $1, %r13
13875 jne _less_than_8_bytes_left_decrypt
13876 _multiple_of_16_bytes_decrypt:
13877- mov arg8, %r12 # %r13 = aadLen (number of bytes)
13878- shl $3, %r12 # convert into number of bits
13879- movd %r12d, %xmm15 # len(A) in %xmm15
13880+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
13881+ shl $3, %r15 # convert into number of bits
13882+ movd %r15d, %xmm15 # len(A) in %xmm15
13883 shl $3, %arg4 # len(C) in bits (*128)
13884 MOVQ_R64_XMM %arg4, %xmm1
13885 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13886@@ -1440,7 +1441,8 @@ _return_T_done_decrypt:
13887 mov %r14, %rsp
13888 pop %r14
13889 pop %r13
13890- pop %r12
13891+ pop %r15
13892+ pax_force_retaddr
13893 ret
13894 ENDPROC(aesni_gcm_dec)
13895
13896@@ -1529,7 +1531,7 @@ ENDPROC(aesni_gcm_dec)
13897 * poly = x^128 + x^127 + x^126 + x^121 + 1
13898 ***************************************************************************/
13899 ENTRY(aesni_gcm_enc)
13900- push %r12
13901+ push %r15
13902 push %r13
13903 push %r14
13904 mov %rsp, %r14
13905@@ -1539,8 +1541,8 @@ ENTRY(aesni_gcm_enc)
13906 #
13907 sub $VARIABLE_OFFSET, %rsp
13908 and $~63, %rsp
13909- mov %arg6, %r12
13910- movdqu (%r12), %xmm13
13911+ mov %arg6, %r15
13912+ movdqu (%r15), %xmm13
13913 movdqa SHUF_MASK(%rip), %xmm2
13914 PSHUFB_XMM %xmm2, %xmm13
13915
13916@@ -1564,13 +1566,13 @@ ENTRY(aesni_gcm_enc)
13917 movdqa %xmm13, HashKey(%rsp)
13918 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
13919 and $-16, %r13
13920- mov %r13, %r12
13921+ mov %r13, %r15
13922
13923 # Encrypt first few blocks
13924
13925- and $(3<<4), %r12
13926+ and $(3<<4), %r15
13927 jz _initial_num_blocks_is_0_encrypt
13928- cmp $(2<<4), %r12
13929+ cmp $(2<<4), %r15
13930 jb _initial_num_blocks_is_1_encrypt
13931 je _initial_num_blocks_is_2_encrypt
13932 _initial_num_blocks_is_3_encrypt:
13933@@ -1623,14 +1625,14 @@ _zero_cipher_left_encrypt:
13934 sub $16, %r11
13935 add %r13, %r11
13936 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
13937- lea SHIFT_MASK+16(%rip), %r12
13938- sub %r13, %r12
13939+ lea SHIFT_MASK+16(%rip), %r15
13940+ sub %r13, %r15
13941 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
13942 # (%r13 is the number of bytes in plaintext mod 16)
13943- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13944+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13945 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
13946 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
13947- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13948+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13949 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
13950 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
13951 movdqa SHUF_MASK(%rip), %xmm10
13952@@ -1663,9 +1665,9 @@ _less_than_8_bytes_left_encrypt:
13953 sub $1, %r13
13954 jne _less_than_8_bytes_left_encrypt
13955 _multiple_of_16_bytes_encrypt:
13956- mov arg8, %r12 # %r12 = addLen (number of bytes)
13957- shl $3, %r12
13958- movd %r12d, %xmm15 # len(A) in %xmm15
13959+ mov arg8, %r15 # %r15 = addLen (number of bytes)
13960+ shl $3, %r15
13961+ movd %r15d, %xmm15 # len(A) in %xmm15
13962 shl $3, %arg4 # len(C) in bits (*128)
13963 MOVQ_R64_XMM %arg4, %xmm1
13964 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13965@@ -1704,7 +1706,8 @@ _return_T_done_encrypt:
13966 mov %r14, %rsp
13967 pop %r14
13968 pop %r13
13969- pop %r12
13970+ pop %r15
13971+ pax_force_retaddr
13972 ret
13973 ENDPROC(aesni_gcm_enc)
13974
13975@@ -1722,6 +1725,7 @@ _key_expansion_256a:
13976 pxor %xmm1, %xmm0
13977 movaps %xmm0, (TKEYP)
13978 add $0x10, TKEYP
13979+ pax_force_retaddr
13980 ret
13981 ENDPROC(_key_expansion_128)
13982 ENDPROC(_key_expansion_256a)
13983@@ -1748,6 +1752,7 @@ _key_expansion_192a:
13984 shufps $0b01001110, %xmm2, %xmm1
13985 movaps %xmm1, 0x10(TKEYP)
13986 add $0x20, TKEYP
13987+ pax_force_retaddr
13988 ret
13989 ENDPROC(_key_expansion_192a)
13990
13991@@ -1768,6 +1773,7 @@ _key_expansion_192b:
13992
13993 movaps %xmm0, (TKEYP)
13994 add $0x10, TKEYP
13995+ pax_force_retaddr
13996 ret
13997 ENDPROC(_key_expansion_192b)
13998
13999@@ -1781,6 +1787,7 @@ _key_expansion_256b:
14000 pxor %xmm1, %xmm2
14001 movaps %xmm2, (TKEYP)
14002 add $0x10, TKEYP
14003+ pax_force_retaddr
14004 ret
14005 ENDPROC(_key_expansion_256b)
14006
14007@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
14008 #ifndef __x86_64__
14009 popl KEYP
14010 #endif
14011+ pax_force_retaddr
14012 ret
14013 ENDPROC(aesni_set_key)
14014
14015@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
14016 popl KLEN
14017 popl KEYP
14018 #endif
14019+ pax_force_retaddr
14020 ret
14021 ENDPROC(aesni_enc)
14022
14023@@ -1974,6 +1983,7 @@ _aesni_enc1:
14024 AESENC KEY STATE
14025 movaps 0x70(TKEYP), KEY
14026 AESENCLAST KEY STATE
14027+ pax_force_retaddr
14028 ret
14029 ENDPROC(_aesni_enc1)
14030
14031@@ -2083,6 +2093,7 @@ _aesni_enc4:
14032 AESENCLAST KEY STATE2
14033 AESENCLAST KEY STATE3
14034 AESENCLAST KEY STATE4
14035+ pax_force_retaddr
14036 ret
14037 ENDPROC(_aesni_enc4)
14038
14039@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
14040 popl KLEN
14041 popl KEYP
14042 #endif
14043+ pax_force_retaddr
14044 ret
14045 ENDPROC(aesni_dec)
14046
14047@@ -2164,6 +2176,7 @@ _aesni_dec1:
14048 AESDEC KEY STATE
14049 movaps 0x70(TKEYP), KEY
14050 AESDECLAST KEY STATE
14051+ pax_force_retaddr
14052 ret
14053 ENDPROC(_aesni_dec1)
14054
14055@@ -2273,6 +2286,7 @@ _aesni_dec4:
14056 AESDECLAST KEY STATE2
14057 AESDECLAST KEY STATE3
14058 AESDECLAST KEY STATE4
14059+ pax_force_retaddr
14060 ret
14061 ENDPROC(_aesni_dec4)
14062
14063@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
14064 popl KEYP
14065 popl LEN
14066 #endif
14067+ pax_force_retaddr
14068 ret
14069 ENDPROC(aesni_ecb_enc)
14070
14071@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
14072 popl KEYP
14073 popl LEN
14074 #endif
14075+ pax_force_retaddr
14076 ret
14077 ENDPROC(aesni_ecb_dec)
14078
14079@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
14080 popl LEN
14081 popl IVP
14082 #endif
14083+ pax_force_retaddr
14084 ret
14085 ENDPROC(aesni_cbc_enc)
14086
14087@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
14088 popl LEN
14089 popl IVP
14090 #endif
14091+ pax_force_retaddr
14092 ret
14093 ENDPROC(aesni_cbc_dec)
14094
14095@@ -2550,6 +2568,7 @@ _aesni_inc_init:
14096 mov $1, TCTR_LOW
14097 MOVQ_R64_XMM TCTR_LOW INC
14098 MOVQ_R64_XMM CTR TCTR_LOW
14099+ pax_force_retaddr
14100 ret
14101 ENDPROC(_aesni_inc_init)
14102
14103@@ -2579,6 +2598,7 @@ _aesni_inc:
14104 .Linc_low:
14105 movaps CTR, IV
14106 PSHUFB_XMM BSWAP_MASK IV
14107+ pax_force_retaddr
14108 ret
14109 ENDPROC(_aesni_inc)
14110
14111@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
14112 .Lctr_enc_ret:
14113 movups IV, (IVP)
14114 .Lctr_enc_just_ret:
14115+ pax_force_retaddr
14116 ret
14117 ENDPROC(aesni_ctr_enc)
14118
14119@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
14120 pxor INC, STATE4
14121 movdqu STATE4, 0x70(OUTP)
14122
14123+ pax_force_retaddr
14124 ret
14125 ENDPROC(aesni_xts_crypt8)
14126
14127diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
14128index 246c670..466e2d6 100644
14129--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
14130+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
14131@@ -21,6 +21,7 @@
14132 */
14133
14134 #include <linux/linkage.h>
14135+#include <asm/alternative-asm.h>
14136
14137 .file "blowfish-x86_64-asm.S"
14138 .text
14139@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
14140 jnz .L__enc_xor;
14141
14142 write_block();
14143+ pax_force_retaddr
14144 ret;
14145 .L__enc_xor:
14146 xor_block();
14147+ pax_force_retaddr
14148 ret;
14149 ENDPROC(__blowfish_enc_blk)
14150
14151@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
14152
14153 movq %r11, %rbp;
14154
14155+ pax_force_retaddr
14156 ret;
14157 ENDPROC(blowfish_dec_blk)
14158
14159@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
14160
14161 popq %rbx;
14162 popq %rbp;
14163+ pax_force_retaddr
14164 ret;
14165
14166 .L__enc_xor4:
14167@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
14168
14169 popq %rbx;
14170 popq %rbp;
14171+ pax_force_retaddr
14172 ret;
14173 ENDPROC(__blowfish_enc_blk_4way)
14174
14175@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
14176 popq %rbx;
14177 popq %rbp;
14178
14179+ pax_force_retaddr
14180 ret;
14181 ENDPROC(blowfish_dec_blk_4way)
14182diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
14183index ce71f92..1dce7ec 100644
14184--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
14185+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
14186@@ -16,6 +16,7 @@
14187 */
14188
14189 #include <linux/linkage.h>
14190+#include <asm/alternative-asm.h>
14191
14192 #define CAMELLIA_TABLE_BYTE_LEN 272
14193
14194@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
14195 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
14196 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
14197 %rcx, (%r9));
14198+ pax_force_retaddr
14199 ret;
14200 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
14201
14202@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
14203 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
14204 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
14205 %rax, (%r9));
14206+ pax_force_retaddr
14207 ret;
14208 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
14209
14210@@ -780,6 +783,7 @@ __camellia_enc_blk16:
14211 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
14212 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
14213
14214+ pax_force_retaddr
14215 ret;
14216
14217 .align 8
14218@@ -865,6 +869,7 @@ __camellia_dec_blk16:
14219 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
14220 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
14221
14222+ pax_force_retaddr
14223 ret;
14224
14225 .align 8
14226@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
14227 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
14228 %xmm8, %rsi);
14229
14230+ pax_force_retaddr
14231 ret;
14232 ENDPROC(camellia_ecb_enc_16way)
14233
14234@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
14235 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
14236 %xmm8, %rsi);
14237
14238+ pax_force_retaddr
14239 ret;
14240 ENDPROC(camellia_ecb_dec_16way)
14241
14242@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
14243 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
14244 %xmm8, %rsi);
14245
14246+ pax_force_retaddr
14247 ret;
14248 ENDPROC(camellia_cbc_dec_16way)
14249
14250@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
14251 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
14252 %xmm8, %rsi);
14253
14254+ pax_force_retaddr
14255 ret;
14256 ENDPROC(camellia_ctr_16way)
14257
14258@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
14259 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
14260 %xmm8, %rsi);
14261
14262+ pax_force_retaddr
14263 ret;
14264 ENDPROC(camellia_xts_crypt_16way)
14265
14266diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
14267index 0e0b886..5a3123c 100644
14268--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
14269+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
14270@@ -11,6 +11,7 @@
14271 */
14272
14273 #include <linux/linkage.h>
14274+#include <asm/alternative-asm.h>
14275
14276 #define CAMELLIA_TABLE_BYTE_LEN 272
14277
14278@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
14279 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
14280 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
14281 %rcx, (%r9));
14282+ pax_force_retaddr
14283 ret;
14284 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
14285
14286@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
14287 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
14288 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
14289 %rax, (%r9));
14290+ pax_force_retaddr
14291 ret;
14292 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
14293
14294@@ -820,6 +823,7 @@ __camellia_enc_blk32:
14295 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
14296 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
14297
14298+ pax_force_retaddr
14299 ret;
14300
14301 .align 8
14302@@ -905,6 +909,7 @@ __camellia_dec_blk32:
14303 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
14304 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
14305
14306+ pax_force_retaddr
14307 ret;
14308
14309 .align 8
14310@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
14311
14312 vzeroupper;
14313
14314+ pax_force_retaddr
14315 ret;
14316 ENDPROC(camellia_ecb_enc_32way)
14317
14318@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
14319
14320 vzeroupper;
14321
14322+ pax_force_retaddr
14323 ret;
14324 ENDPROC(camellia_ecb_dec_32way)
14325
14326@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
14327
14328 vzeroupper;
14329
14330+ pax_force_retaddr
14331 ret;
14332 ENDPROC(camellia_cbc_dec_32way)
14333
14334@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
14335
14336 vzeroupper;
14337
14338+ pax_force_retaddr
14339 ret;
14340 ENDPROC(camellia_ctr_32way)
14341
14342@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
14343
14344 vzeroupper;
14345
14346+ pax_force_retaddr
14347 ret;
14348 ENDPROC(camellia_xts_crypt_32way)
14349
14350diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
14351index 310319c..db3d7b5 100644
14352--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
14353+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
14354@@ -21,6 +21,7 @@
14355 */
14356
14357 #include <linux/linkage.h>
14358+#include <asm/alternative-asm.h>
14359
14360 .file "camellia-x86_64-asm_64.S"
14361 .text
14362@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
14363 enc_outunpack(mov, RT1);
14364
14365 movq RRBP, %rbp;
14366+ pax_force_retaddr
14367 ret;
14368
14369 .L__enc_xor:
14370 enc_outunpack(xor, RT1);
14371
14372 movq RRBP, %rbp;
14373+ pax_force_retaddr
14374 ret;
14375 ENDPROC(__camellia_enc_blk)
14376
14377@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
14378 dec_outunpack();
14379
14380 movq RRBP, %rbp;
14381+ pax_force_retaddr
14382 ret;
14383 ENDPROC(camellia_dec_blk)
14384
14385@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
14386
14387 movq RRBP, %rbp;
14388 popq %rbx;
14389+ pax_force_retaddr
14390 ret;
14391
14392 .L__enc2_xor:
14393@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
14394
14395 movq RRBP, %rbp;
14396 popq %rbx;
14397+ pax_force_retaddr
14398 ret;
14399 ENDPROC(__camellia_enc_blk_2way)
14400
14401@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
14402
14403 movq RRBP, %rbp;
14404 movq RXOR, %rbx;
14405+ pax_force_retaddr
14406 ret;
14407 ENDPROC(camellia_dec_blk_2way)
14408diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
14409index c35fd5d..2d8c7db 100644
14410--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
14411+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
14412@@ -24,6 +24,7 @@
14413 */
14414
14415 #include <linux/linkage.h>
14416+#include <asm/alternative-asm.h>
14417
14418 .file "cast5-avx-x86_64-asm_64.S"
14419
14420@@ -281,6 +282,7 @@ __cast5_enc_blk16:
14421 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
14422 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
14423
14424+ pax_force_retaddr
14425 ret;
14426 ENDPROC(__cast5_enc_blk16)
14427
14428@@ -352,6 +354,7 @@ __cast5_dec_blk16:
14429 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
14430 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
14431
14432+ pax_force_retaddr
14433 ret;
14434
14435 .L__skip_dec:
14436@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
14437 vmovdqu RR4, (6*4*4)(%r11);
14438 vmovdqu RL4, (7*4*4)(%r11);
14439
14440+ pax_force_retaddr
14441 ret;
14442 ENDPROC(cast5_ecb_enc_16way)
14443
14444@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
14445 vmovdqu RR4, (6*4*4)(%r11);
14446 vmovdqu RL4, (7*4*4)(%r11);
14447
14448+ pax_force_retaddr
14449 ret;
14450 ENDPROC(cast5_ecb_dec_16way)
14451
14452@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
14453 * %rdx: src
14454 */
14455
14456- pushq %r12;
14457+ pushq %r14;
14458
14459 movq %rsi, %r11;
14460- movq %rdx, %r12;
14461+ movq %rdx, %r14;
14462
14463 vmovdqu (0*16)(%rdx), RL1;
14464 vmovdqu (1*16)(%rdx), RR1;
14465@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
14466 call __cast5_dec_blk16;
14467
14468 /* xor with src */
14469- vmovq (%r12), RX;
14470+ vmovq (%r14), RX;
14471 vpshufd $0x4f, RX, RX;
14472 vpxor RX, RR1, RR1;
14473- vpxor 0*16+8(%r12), RL1, RL1;
14474- vpxor 1*16+8(%r12), RR2, RR2;
14475- vpxor 2*16+8(%r12), RL2, RL2;
14476- vpxor 3*16+8(%r12), RR3, RR3;
14477- vpxor 4*16+8(%r12), RL3, RL3;
14478- vpxor 5*16+8(%r12), RR4, RR4;
14479- vpxor 6*16+8(%r12), RL4, RL4;
14480+ vpxor 0*16+8(%r14), RL1, RL1;
14481+ vpxor 1*16+8(%r14), RR2, RR2;
14482+ vpxor 2*16+8(%r14), RL2, RL2;
14483+ vpxor 3*16+8(%r14), RR3, RR3;
14484+ vpxor 4*16+8(%r14), RL3, RL3;
14485+ vpxor 5*16+8(%r14), RR4, RR4;
14486+ vpxor 6*16+8(%r14), RL4, RL4;
14487
14488 vmovdqu RR1, (0*16)(%r11);
14489 vmovdqu RL1, (1*16)(%r11);
14490@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
14491 vmovdqu RR4, (6*16)(%r11);
14492 vmovdqu RL4, (7*16)(%r11);
14493
14494- popq %r12;
14495+ popq %r14;
14496
14497+ pax_force_retaddr
14498 ret;
14499 ENDPROC(cast5_cbc_dec_16way)
14500
14501@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
14502 * %rcx: iv (big endian, 64bit)
14503 */
14504
14505- pushq %r12;
14506+ pushq %r14;
14507
14508 movq %rsi, %r11;
14509- movq %rdx, %r12;
14510+ movq %rdx, %r14;
14511
14512 vpcmpeqd RTMP, RTMP, RTMP;
14513 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
14514@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
14515 call __cast5_enc_blk16;
14516
14517 /* dst = src ^ iv */
14518- vpxor (0*16)(%r12), RR1, RR1;
14519- vpxor (1*16)(%r12), RL1, RL1;
14520- vpxor (2*16)(%r12), RR2, RR2;
14521- vpxor (3*16)(%r12), RL2, RL2;
14522- vpxor (4*16)(%r12), RR3, RR3;
14523- vpxor (5*16)(%r12), RL3, RL3;
14524- vpxor (6*16)(%r12), RR4, RR4;
14525- vpxor (7*16)(%r12), RL4, RL4;
14526+ vpxor (0*16)(%r14), RR1, RR1;
14527+ vpxor (1*16)(%r14), RL1, RL1;
14528+ vpxor (2*16)(%r14), RR2, RR2;
14529+ vpxor (3*16)(%r14), RL2, RL2;
14530+ vpxor (4*16)(%r14), RR3, RR3;
14531+ vpxor (5*16)(%r14), RL3, RL3;
14532+ vpxor (6*16)(%r14), RR4, RR4;
14533+ vpxor (7*16)(%r14), RL4, RL4;
14534 vmovdqu RR1, (0*16)(%r11);
14535 vmovdqu RL1, (1*16)(%r11);
14536 vmovdqu RR2, (2*16)(%r11);
14537@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
14538 vmovdqu RR4, (6*16)(%r11);
14539 vmovdqu RL4, (7*16)(%r11);
14540
14541- popq %r12;
14542+ popq %r14;
14543
14544+ pax_force_retaddr
14545 ret;
14546 ENDPROC(cast5_ctr_16way)
14547diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
14548index e3531f8..e123f35 100644
14549--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
14550+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
14551@@ -24,6 +24,7 @@
14552 */
14553
14554 #include <linux/linkage.h>
14555+#include <asm/alternative-asm.h>
14556 #include "glue_helper-asm-avx.S"
14557
14558 .file "cast6-avx-x86_64-asm_64.S"
14559@@ -295,6 +296,7 @@ __cast6_enc_blk8:
14560 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
14561 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
14562
14563+ pax_force_retaddr
14564 ret;
14565 ENDPROC(__cast6_enc_blk8)
14566
14567@@ -340,6 +342,7 @@ __cast6_dec_blk8:
14568 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
14569 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
14570
14571+ pax_force_retaddr
14572 ret;
14573 ENDPROC(__cast6_dec_blk8)
14574
14575@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
14576
14577 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14578
14579+ pax_force_retaddr
14580 ret;
14581 ENDPROC(cast6_ecb_enc_8way)
14582
14583@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
14584
14585 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14586
14587+ pax_force_retaddr
14588 ret;
14589 ENDPROC(cast6_ecb_dec_8way)
14590
14591@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
14592 * %rdx: src
14593 */
14594
14595- pushq %r12;
14596+ pushq %r14;
14597
14598 movq %rsi, %r11;
14599- movq %rdx, %r12;
14600+ movq %rdx, %r14;
14601
14602 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14603
14604 call __cast6_dec_blk8;
14605
14606- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14607+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14608
14609- popq %r12;
14610+ popq %r14;
14611
14612+ pax_force_retaddr
14613 ret;
14614 ENDPROC(cast6_cbc_dec_8way)
14615
14616@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
14617 * %rcx: iv (little endian, 128bit)
14618 */
14619
14620- pushq %r12;
14621+ pushq %r14;
14622
14623 movq %rsi, %r11;
14624- movq %rdx, %r12;
14625+ movq %rdx, %r14;
14626
14627 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14628 RD2, RX, RKR, RKM);
14629
14630 call __cast6_enc_blk8;
14631
14632- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14633+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14634
14635- popq %r12;
14636+ popq %r14;
14637
14638+ pax_force_retaddr
14639 ret;
14640 ENDPROC(cast6_ctr_8way)
14641
14642@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
14643 /* dst <= regs xor IVs(in dst) */
14644 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14645
14646+ pax_force_retaddr
14647 ret;
14648 ENDPROC(cast6_xts_enc_8way)
14649
14650@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
14651 /* dst <= regs xor IVs(in dst) */
14652 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14653
14654+ pax_force_retaddr
14655 ret;
14656 ENDPROC(cast6_xts_dec_8way)
14657diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14658index 26d49eb..c0a8c84 100644
14659--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14660+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14661@@ -45,6 +45,7 @@
14662
14663 #include <asm/inst.h>
14664 #include <linux/linkage.h>
14665+#include <asm/alternative-asm.h>
14666
14667 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
14668
14669@@ -309,6 +310,7 @@ do_return:
14670 popq %rsi
14671 popq %rdi
14672 popq %rbx
14673+ pax_force_retaddr
14674 ret
14675
14676 ################################################################
14677diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14678index 5d1e007..098cb4f 100644
14679--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
14680+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14681@@ -18,6 +18,7 @@
14682
14683 #include <linux/linkage.h>
14684 #include <asm/inst.h>
14685+#include <asm/alternative-asm.h>
14686
14687 .data
14688
14689@@ -89,6 +90,7 @@ __clmul_gf128mul_ble:
14690 psrlq $1, T2
14691 pxor T2, T1
14692 pxor T1, DATA
14693+ pax_force_retaddr
14694 ret
14695 ENDPROC(__clmul_gf128mul_ble)
14696
14697@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
14698 call __clmul_gf128mul_ble
14699 PSHUFB_XMM BSWAP DATA
14700 movups DATA, (%rdi)
14701+ pax_force_retaddr
14702 ret
14703 ENDPROC(clmul_ghash_mul)
14704
14705@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update)
14706 PSHUFB_XMM BSWAP DATA
14707 movups DATA, (%rdi)
14708 .Lupdate_just_ret:
14709+ pax_force_retaddr
14710 ret
14711 ENDPROC(clmul_ghash_update)
14712diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14713index 9279e0b..c4b3d2c 100644
14714--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
14715+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14716@@ -1,4 +1,5 @@
14717 #include <linux/linkage.h>
14718+#include <asm/alternative-asm.h>
14719
14720 # enter salsa20_encrypt_bytes
14721 ENTRY(salsa20_encrypt_bytes)
14722@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
14723 add %r11,%rsp
14724 mov %rdi,%rax
14725 mov %rsi,%rdx
14726+ pax_force_retaddr
14727 ret
14728 # bytesatleast65:
14729 ._bytesatleast65:
14730@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
14731 add %r11,%rsp
14732 mov %rdi,%rax
14733 mov %rsi,%rdx
14734+ pax_force_retaddr
14735 ret
14736 ENDPROC(salsa20_keysetup)
14737
14738@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
14739 add %r11,%rsp
14740 mov %rdi,%rax
14741 mov %rsi,%rdx
14742+ pax_force_retaddr
14743 ret
14744 ENDPROC(salsa20_ivsetup)
14745diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14746index 2f202f4..d9164d6 100644
14747--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14748+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14749@@ -24,6 +24,7 @@
14750 */
14751
14752 #include <linux/linkage.h>
14753+#include <asm/alternative-asm.h>
14754 #include "glue_helper-asm-avx.S"
14755
14756 .file "serpent-avx-x86_64-asm_64.S"
14757@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
14758 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14759 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14760
14761+ pax_force_retaddr
14762 ret;
14763 ENDPROC(__serpent_enc_blk8_avx)
14764
14765@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
14766 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14767 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14768
14769+ pax_force_retaddr
14770 ret;
14771 ENDPROC(__serpent_dec_blk8_avx)
14772
14773@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
14774
14775 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14776
14777+ pax_force_retaddr
14778 ret;
14779 ENDPROC(serpent_ecb_enc_8way_avx)
14780
14781@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
14782
14783 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14784
14785+ pax_force_retaddr
14786 ret;
14787 ENDPROC(serpent_ecb_dec_8way_avx)
14788
14789@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
14790
14791 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14792
14793+ pax_force_retaddr
14794 ret;
14795 ENDPROC(serpent_cbc_dec_8way_avx)
14796
14797@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
14798
14799 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14800
14801+ pax_force_retaddr
14802 ret;
14803 ENDPROC(serpent_ctr_8way_avx)
14804
14805@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
14806 /* dst <= regs xor IVs(in dst) */
14807 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14808
14809+ pax_force_retaddr
14810 ret;
14811 ENDPROC(serpent_xts_enc_8way_avx)
14812
14813@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
14814 /* dst <= regs xor IVs(in dst) */
14815 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14816
14817+ pax_force_retaddr
14818 ret;
14819 ENDPROC(serpent_xts_dec_8way_avx)
14820diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
14821index b222085..abd483c 100644
14822--- a/arch/x86/crypto/serpent-avx2-asm_64.S
14823+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
14824@@ -15,6 +15,7 @@
14825 */
14826
14827 #include <linux/linkage.h>
14828+#include <asm/alternative-asm.h>
14829 #include "glue_helper-asm-avx2.S"
14830
14831 .file "serpent-avx2-asm_64.S"
14832@@ -610,6 +611,7 @@ __serpent_enc_blk16:
14833 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14834 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14835
14836+ pax_force_retaddr
14837 ret;
14838 ENDPROC(__serpent_enc_blk16)
14839
14840@@ -664,6 +666,7 @@ __serpent_dec_blk16:
14841 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14842 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14843
14844+ pax_force_retaddr
14845 ret;
14846 ENDPROC(__serpent_dec_blk16)
14847
14848@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
14849
14850 vzeroupper;
14851
14852+ pax_force_retaddr
14853 ret;
14854 ENDPROC(serpent_ecb_enc_16way)
14855
14856@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
14857
14858 vzeroupper;
14859
14860+ pax_force_retaddr
14861 ret;
14862 ENDPROC(serpent_ecb_dec_16way)
14863
14864@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
14865
14866 vzeroupper;
14867
14868+ pax_force_retaddr
14869 ret;
14870 ENDPROC(serpent_cbc_dec_16way)
14871
14872@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
14873
14874 vzeroupper;
14875
14876+ pax_force_retaddr
14877 ret;
14878 ENDPROC(serpent_ctr_16way)
14879
14880@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
14881
14882 vzeroupper;
14883
14884+ pax_force_retaddr
14885 ret;
14886 ENDPROC(serpent_xts_enc_16way)
14887
14888@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
14889
14890 vzeroupper;
14891
14892+ pax_force_retaddr
14893 ret;
14894 ENDPROC(serpent_xts_dec_16way)
14895diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14896index acc066c..1559cc4 100644
14897--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14898+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14899@@ -25,6 +25,7 @@
14900 */
14901
14902 #include <linux/linkage.h>
14903+#include <asm/alternative-asm.h>
14904
14905 .file "serpent-sse2-x86_64-asm_64.S"
14906 .text
14907@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
14908 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14909 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14910
14911+ pax_force_retaddr
14912 ret;
14913
14914 .L__enc_xor8:
14915 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14916 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14917
14918+ pax_force_retaddr
14919 ret;
14920 ENDPROC(__serpent_enc_blk_8way)
14921
14922@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
14923 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14924 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14925
14926+ pax_force_retaddr
14927 ret;
14928 ENDPROC(serpent_dec_blk_8way)
14929diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
14930index a410950..9dfe7ad 100644
14931--- a/arch/x86/crypto/sha1_ssse3_asm.S
14932+++ b/arch/x86/crypto/sha1_ssse3_asm.S
14933@@ -29,6 +29,7 @@
14934 */
14935
14936 #include <linux/linkage.h>
14937+#include <asm/alternative-asm.h>
14938
14939 #define CTX %rdi // arg1
14940 #define BUF %rsi // arg2
14941@@ -75,9 +76,9 @@
14942
14943 push %rbx
14944 push %rbp
14945- push %r12
14946+ push %r14
14947
14948- mov %rsp, %r12
14949+ mov %rsp, %r14
14950 sub $64, %rsp # allocate workspace
14951 and $~15, %rsp # align stack
14952
14953@@ -99,11 +100,12 @@
14954 xor %rax, %rax
14955 rep stosq
14956
14957- mov %r12, %rsp # deallocate workspace
14958+ mov %r14, %rsp # deallocate workspace
14959
14960- pop %r12
14961+ pop %r14
14962 pop %rbp
14963 pop %rbx
14964+ pax_force_retaddr
14965 ret
14966
14967 ENDPROC(\name)
14968diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
14969index 642f156..51a513c 100644
14970--- a/arch/x86/crypto/sha256-avx-asm.S
14971+++ b/arch/x86/crypto/sha256-avx-asm.S
14972@@ -49,6 +49,7 @@
14973
14974 #ifdef CONFIG_AS_AVX
14975 #include <linux/linkage.h>
14976+#include <asm/alternative-asm.h>
14977
14978 ## assume buffers not aligned
14979 #define VMOVDQ vmovdqu
14980@@ -460,6 +461,7 @@ done_hash:
14981 popq %r13
14982 popq %rbp
14983 popq %rbx
14984+ pax_force_retaddr
14985 ret
14986 ENDPROC(sha256_transform_avx)
14987
14988diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
14989index 9e86944..3795e6a 100644
14990--- a/arch/x86/crypto/sha256-avx2-asm.S
14991+++ b/arch/x86/crypto/sha256-avx2-asm.S
14992@@ -50,6 +50,7 @@
14993
14994 #ifdef CONFIG_AS_AVX2
14995 #include <linux/linkage.h>
14996+#include <asm/alternative-asm.h>
14997
14998 ## assume buffers not aligned
14999 #define VMOVDQ vmovdqu
15000@@ -720,6 +721,7 @@ done_hash:
15001 popq %r12
15002 popq %rbp
15003 popq %rbx
15004+ pax_force_retaddr
15005 ret
15006 ENDPROC(sha256_transform_rorx)
15007
15008diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
15009index f833b74..8c62a9e 100644
15010--- a/arch/x86/crypto/sha256-ssse3-asm.S
15011+++ b/arch/x86/crypto/sha256-ssse3-asm.S
15012@@ -47,6 +47,7 @@
15013 ########################################################################
15014
15015 #include <linux/linkage.h>
15016+#include <asm/alternative-asm.h>
15017
15018 ## assume buffers not aligned
15019 #define MOVDQ movdqu
15020@@ -471,6 +472,7 @@ done_hash:
15021 popq %rbp
15022 popq %rbx
15023
15024+ pax_force_retaddr
15025 ret
15026 ENDPROC(sha256_transform_ssse3)
15027
15028diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
15029index 974dde9..a823ff9 100644
15030--- a/arch/x86/crypto/sha512-avx-asm.S
15031+++ b/arch/x86/crypto/sha512-avx-asm.S
15032@@ -49,6 +49,7 @@
15033
15034 #ifdef CONFIG_AS_AVX
15035 #include <linux/linkage.h>
15036+#include <asm/alternative-asm.h>
15037
15038 .text
15039
15040@@ -364,6 +365,7 @@ updateblock:
15041 mov frame_RSPSAVE(%rsp), %rsp
15042
15043 nowork:
15044+ pax_force_retaddr
15045 ret
15046 ENDPROC(sha512_transform_avx)
15047
15048diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
15049index 568b961..ed20c37 100644
15050--- a/arch/x86/crypto/sha512-avx2-asm.S
15051+++ b/arch/x86/crypto/sha512-avx2-asm.S
15052@@ -51,6 +51,7 @@
15053
15054 #ifdef CONFIG_AS_AVX2
15055 #include <linux/linkage.h>
15056+#include <asm/alternative-asm.h>
15057
15058 .text
15059
15060@@ -678,6 +679,7 @@ done_hash:
15061
15062 # Restore Stack Pointer
15063 mov frame_RSPSAVE(%rsp), %rsp
15064+ pax_force_retaddr
15065 ret
15066 ENDPROC(sha512_transform_rorx)
15067
15068diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
15069index fb56855..6edd768 100644
15070--- a/arch/x86/crypto/sha512-ssse3-asm.S
15071+++ b/arch/x86/crypto/sha512-ssse3-asm.S
15072@@ -48,6 +48,7 @@
15073 ########################################################################
15074
15075 #include <linux/linkage.h>
15076+#include <asm/alternative-asm.h>
15077
15078 .text
15079
15080@@ -363,6 +364,7 @@ updateblock:
15081 mov frame_RSPSAVE(%rsp), %rsp
15082
15083 nowork:
15084+ pax_force_retaddr
15085 ret
15086 ENDPROC(sha512_transform_ssse3)
15087
15088diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
15089index 0505813..b067311 100644
15090--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
15091+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
15092@@ -24,6 +24,7 @@
15093 */
15094
15095 #include <linux/linkage.h>
15096+#include <asm/alternative-asm.h>
15097 #include "glue_helper-asm-avx.S"
15098
15099 .file "twofish-avx-x86_64-asm_64.S"
15100@@ -284,6 +285,7 @@ __twofish_enc_blk8:
15101 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
15102 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
15103
15104+ pax_force_retaddr
15105 ret;
15106 ENDPROC(__twofish_enc_blk8)
15107
15108@@ -324,6 +326,7 @@ __twofish_dec_blk8:
15109 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
15110 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
15111
15112+ pax_force_retaddr
15113 ret;
15114 ENDPROC(__twofish_dec_blk8)
15115
15116@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
15117
15118 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
15119
15120+ pax_force_retaddr
15121 ret;
15122 ENDPROC(twofish_ecb_enc_8way)
15123
15124@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
15125
15126 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
15127
15128+ pax_force_retaddr
15129 ret;
15130 ENDPROC(twofish_ecb_dec_8way)
15131
15132@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
15133 * %rdx: src
15134 */
15135
15136- pushq %r12;
15137+ pushq %r14;
15138
15139 movq %rsi, %r11;
15140- movq %rdx, %r12;
15141+ movq %rdx, %r14;
15142
15143 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
15144
15145 call __twofish_dec_blk8;
15146
15147- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
15148+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
15149
15150- popq %r12;
15151+ popq %r14;
15152
15153+ pax_force_retaddr
15154 ret;
15155 ENDPROC(twofish_cbc_dec_8way)
15156
15157@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
15158 * %rcx: iv (little endian, 128bit)
15159 */
15160
15161- pushq %r12;
15162+ pushq %r14;
15163
15164 movq %rsi, %r11;
15165- movq %rdx, %r12;
15166+ movq %rdx, %r14;
15167
15168 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
15169 RD2, RX0, RX1, RY0);
15170
15171 call __twofish_enc_blk8;
15172
15173- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
15174+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
15175
15176- popq %r12;
15177+ popq %r14;
15178
15179+ pax_force_retaddr
15180 ret;
15181 ENDPROC(twofish_ctr_8way)
15182
15183@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
15184 /* dst <= regs xor IVs(in dst) */
15185 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
15186
15187+ pax_force_retaddr
15188 ret;
15189 ENDPROC(twofish_xts_enc_8way)
15190
15191@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
15192 /* dst <= regs xor IVs(in dst) */
15193 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
15194
15195+ pax_force_retaddr
15196 ret;
15197 ENDPROC(twofish_xts_dec_8way)
15198diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
15199index 1c3b7ce..02f578d 100644
15200--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
15201+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
15202@@ -21,6 +21,7 @@
15203 */
15204
15205 #include <linux/linkage.h>
15206+#include <asm/alternative-asm.h>
15207
15208 .file "twofish-x86_64-asm-3way.S"
15209 .text
15210@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
15211 popq %r13;
15212 popq %r14;
15213 popq %r15;
15214+ pax_force_retaddr
15215 ret;
15216
15217 .L__enc_xor3:
15218@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
15219 popq %r13;
15220 popq %r14;
15221 popq %r15;
15222+ pax_force_retaddr
15223 ret;
15224 ENDPROC(__twofish_enc_blk_3way)
15225
15226@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
15227 popq %r13;
15228 popq %r14;
15229 popq %r15;
15230+ pax_force_retaddr
15231 ret;
15232 ENDPROC(twofish_dec_blk_3way)
15233diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
15234index a039d21..524b8b2 100644
15235--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
15236+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
15237@@ -22,6 +22,7 @@
15238
15239 #include <linux/linkage.h>
15240 #include <asm/asm-offsets.h>
15241+#include <asm/alternative-asm.h>
15242
15243 #define a_offset 0
15244 #define b_offset 4
15245@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
15246
15247 popq R1
15248 movq $1,%rax
15249+ pax_force_retaddr
15250 ret
15251 ENDPROC(twofish_enc_blk)
15252
15253@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
15254
15255 popq R1
15256 movq $1,%rax
15257+ pax_force_retaddr
15258 ret
15259 ENDPROC(twofish_dec_blk)
15260diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
15261index d21ff89..6da8e6e 100644
15262--- a/arch/x86/ia32/ia32_aout.c
15263+++ b/arch/x86/ia32/ia32_aout.c
15264@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
15265 unsigned long dump_start, dump_size;
15266 struct user32 dump;
15267
15268+ memset(&dump, 0, sizeof(dump));
15269+
15270 fs = get_fs();
15271 set_fs(KERNEL_DS);
15272 has_dumped = 1;
15273diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
15274index f9e181a..db313b5 100644
15275--- a/arch/x86/ia32/ia32_signal.c
15276+++ b/arch/x86/ia32/ia32_signal.c
15277@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
15278 if (__get_user(set.sig[0], &frame->sc.oldmask)
15279 || (_COMPAT_NSIG_WORDS > 1
15280 && __copy_from_user((((char *) &set.sig) + 4),
15281- &frame->extramask,
15282+ frame->extramask,
15283 sizeof(frame->extramask))))
15284 goto badframe;
15285
15286@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
15287 sp -= frame_size;
15288 /* Align the stack pointer according to the i386 ABI,
15289 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
15290- sp = ((sp + 4) & -16ul) - 4;
15291+ sp = ((sp - 12) & -16ul) - 4;
15292 return (void __user *) sp;
15293 }
15294
15295@@ -383,10 +383,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
15296 } else {
15297 /* Return stub is in 32bit vsyscall page */
15298 if (current->mm->context.vdso)
15299- restorer = current->mm->context.vdso +
15300- selected_vdso32->sym___kernel_sigreturn;
15301+ restorer = (void __force_user *)(current->mm->context.vdso +
15302+ selected_vdso32->sym___kernel_sigreturn);
15303 else
15304- restorer = &frame->retcode;
15305+ restorer = frame->retcode;
15306 }
15307
15308 put_user_try {
15309@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
15310 * These are actually not used anymore, but left because some
15311 * gdb versions depend on them as a marker.
15312 */
15313- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
15314+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
15315 } put_user_catch(err);
15316
15317 if (err)
15318@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
15319 0xb8,
15320 __NR_ia32_rt_sigreturn,
15321 0x80cd,
15322- 0,
15323+ 0
15324 };
15325
15326 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
15327@@ -461,16 +461,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
15328
15329 if (ksig->ka.sa.sa_flags & SA_RESTORER)
15330 restorer = ksig->ka.sa.sa_restorer;
15331- else
15332+ else if (current->mm->context.vdso)
15333+ /* Return stub is in 32bit vsyscall page */
15334 restorer = current->mm->context.vdso +
15335 selected_vdso32->sym___kernel_rt_sigreturn;
15336+ else
15337+ restorer = frame->retcode;
15338 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
15339
15340 /*
15341 * Not actually used anymore, but left because some gdb
15342 * versions need it.
15343 */
15344- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
15345+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
15346 } put_user_catch(err);
15347
15348 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
15349diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
15350index 4299eb0..fefe70e 100644
15351--- a/arch/x86/ia32/ia32entry.S
15352+++ b/arch/x86/ia32/ia32entry.S
15353@@ -15,8 +15,10 @@
15354 #include <asm/irqflags.h>
15355 #include <asm/asm.h>
15356 #include <asm/smap.h>
15357+#include <asm/pgtable.h>
15358 #include <linux/linkage.h>
15359 #include <linux/err.h>
15360+#include <asm/alternative-asm.h>
15361
15362 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15363 #include <linux/elf-em.h>
15364@@ -62,12 +64,12 @@
15365 */
15366 .macro LOAD_ARGS32 offset, _r9=0
15367 .if \_r9
15368- movl \offset+16(%rsp),%r9d
15369+ movl \offset+R9(%rsp),%r9d
15370 .endif
15371- movl \offset+40(%rsp),%ecx
15372- movl \offset+48(%rsp),%edx
15373- movl \offset+56(%rsp),%esi
15374- movl \offset+64(%rsp),%edi
15375+ movl \offset+RCX(%rsp),%ecx
15376+ movl \offset+RDX(%rsp),%edx
15377+ movl \offset+RSI(%rsp),%esi
15378+ movl \offset+RDI(%rsp),%edi
15379 movl %eax,%eax /* zero extension */
15380 .endm
15381
15382@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
15383 ENDPROC(native_irq_enable_sysexit)
15384 #endif
15385
15386+ .macro pax_enter_kernel_user
15387+ pax_set_fptr_mask
15388+#ifdef CONFIG_PAX_MEMORY_UDEREF
15389+ call pax_enter_kernel_user
15390+#endif
15391+ .endm
15392+
15393+ .macro pax_exit_kernel_user
15394+#ifdef CONFIG_PAX_MEMORY_UDEREF
15395+ call pax_exit_kernel_user
15396+#endif
15397+#ifdef CONFIG_PAX_RANDKSTACK
15398+ pushq %rax
15399+ pushq %r11
15400+ call pax_randomize_kstack
15401+ popq %r11
15402+ popq %rax
15403+#endif
15404+ .endm
15405+
15406+ .macro pax_erase_kstack
15407+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15408+ call pax_erase_kstack
15409+#endif
15410+ .endm
15411+
15412 /*
15413 * 32bit SYSENTER instruction entry.
15414 *
15415@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
15416 CFI_REGISTER rsp,rbp
15417 SWAPGS_UNSAFE_STACK
15418 movq PER_CPU_VAR(kernel_stack), %rsp
15419- addq $(KERNEL_STACK_OFFSET),%rsp
15420- /*
15421- * No need to follow this irqs on/off section: the syscall
15422- * disabled irqs, here we enable it straight after entry:
15423- */
15424- ENABLE_INTERRUPTS(CLBR_NONE)
15425 movl %ebp,%ebp /* zero extension */
15426 pushq_cfi $__USER32_DS
15427 /*CFI_REL_OFFSET ss,0*/
15428@@ -135,24 +157,49 @@ ENTRY(ia32_sysenter_target)
15429 CFI_REL_OFFSET rsp,0
15430 pushfq_cfi
15431 /*CFI_REL_OFFSET rflags,0*/
15432- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
15433- CFI_REGISTER rip,r10
15434+ orl $X86_EFLAGS_IF,(%rsp)
15435+ GET_THREAD_INFO(%r11)
15436+ movl TI_sysenter_return(%r11), %r11d
15437+ CFI_REGISTER rip,r11
15438 pushq_cfi $__USER32_CS
15439 /*CFI_REL_OFFSET cs,0*/
15440 movl %eax, %eax
15441- pushq_cfi %r10
15442+ pushq_cfi %r11
15443 CFI_REL_OFFSET rip,0
15444 pushq_cfi %rax
15445 cld
15446 SAVE_ARGS 0,1,0
15447+ pax_enter_kernel_user
15448+
15449+#ifdef CONFIG_PAX_RANDKSTACK
15450+ pax_erase_kstack
15451+#endif
15452+
15453+ /*
15454+ * No need to follow this irqs on/off section: the syscall
15455+ * disabled irqs, here we enable it straight after entry:
15456+ */
15457+ ENABLE_INTERRUPTS(CLBR_NONE)
15458 /* no need to do an access_ok check here because rbp has been
15459 32bit zero extended */
15460+
15461+#ifdef CONFIG_PAX_MEMORY_UDEREF
15462+ addq pax_user_shadow_base,%rbp
15463+ ASM_PAX_OPEN_USERLAND
15464+#endif
15465+
15466 ASM_STAC
15467 1: movl (%rbp),%ebp
15468 _ASM_EXTABLE(1b,ia32_badarg)
15469 ASM_CLAC
15470- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15471- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15472+
15473+#ifdef CONFIG_PAX_MEMORY_UDEREF
15474+ ASM_PAX_CLOSE_USERLAND
15475+#endif
15476+
15477+ GET_THREAD_INFO(%r11)
15478+ orl $TS_COMPAT,TI_status(%r11)
15479+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15480 CFI_REMEMBER_STATE
15481 jnz sysenter_tracesys
15482 cmpq $(IA32_NR_syscalls-1),%rax
15483@@ -162,15 +209,18 @@ sysenter_do_call:
15484 sysenter_dispatch:
15485 call *ia32_sys_call_table(,%rax,8)
15486 movq %rax,RAX-ARGOFFSET(%rsp)
15487+ GET_THREAD_INFO(%r11)
15488 DISABLE_INTERRUPTS(CLBR_NONE)
15489 TRACE_IRQS_OFF
15490- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15491+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
15492 jnz sysexit_audit
15493 sysexit_from_sys_call:
15494- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15495+ pax_exit_kernel_user
15496+ pax_erase_kstack
15497+ andl $~TS_COMPAT,TI_status(%r11)
15498 /* clear IF, that popfq doesn't enable interrupts early */
15499- andl $~0x200,EFLAGS-R11(%rsp)
15500- movl RIP-R11(%rsp),%edx /* User %eip */
15501+ andl $~X86_EFLAGS_IF,EFLAGS(%rsp)
15502+ movl RIP(%rsp),%edx /* User %eip */
15503 CFI_REGISTER rip,rdx
15504 RESTORE_ARGS 0,24,0,0,0,0
15505 xorq %r8,%r8
15506@@ -193,6 +243,9 @@ sysexit_from_sys_call:
15507 movl %eax,%esi /* 2nd arg: syscall number */
15508 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
15509 call __audit_syscall_entry
15510+
15511+ pax_erase_kstack
15512+
15513 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
15514 cmpq $(IA32_NR_syscalls-1),%rax
15515 ja ia32_badsys
15516@@ -204,7 +257,7 @@ sysexit_from_sys_call:
15517 .endm
15518
15519 .macro auditsys_exit exit
15520- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15521+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
15522 jnz ia32_ret_from_sys_call
15523 TRACE_IRQS_ON
15524 ENABLE_INTERRUPTS(CLBR_NONE)
15525@@ -215,11 +268,12 @@ sysexit_from_sys_call:
15526 1: setbe %al /* 1 if error, 0 if not */
15527 movzbl %al,%edi /* zero-extend that into %edi */
15528 call __audit_syscall_exit
15529+ GET_THREAD_INFO(%r11)
15530 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
15531 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
15532 DISABLE_INTERRUPTS(CLBR_NONE)
15533 TRACE_IRQS_OFF
15534- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15535+ testl %edi,TI_flags(%r11)
15536 jz \exit
15537 CLEAR_RREGS -ARGOFFSET
15538 jmp int_with_check
15539@@ -237,7 +291,7 @@ sysexit_audit:
15540
15541 sysenter_tracesys:
15542 #ifdef CONFIG_AUDITSYSCALL
15543- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15544+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
15545 jz sysenter_auditsys
15546 #endif
15547 SAVE_REST
15548@@ -249,6 +303,9 @@ sysenter_tracesys:
15549 RESTORE_REST
15550 cmpq $(IA32_NR_syscalls-1),%rax
15551 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
15552+
15553+ pax_erase_kstack
15554+
15555 jmp sysenter_do_call
15556 CFI_ENDPROC
15557 ENDPROC(ia32_sysenter_target)
15558@@ -276,19 +333,25 @@ ENDPROC(ia32_sysenter_target)
15559 ENTRY(ia32_cstar_target)
15560 CFI_STARTPROC32 simple
15561 CFI_SIGNAL_FRAME
15562- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
15563+ CFI_DEF_CFA rsp,0
15564 CFI_REGISTER rip,rcx
15565 /*CFI_REGISTER rflags,r11*/
15566 SWAPGS_UNSAFE_STACK
15567 movl %esp,%r8d
15568 CFI_REGISTER rsp,r8
15569 movq PER_CPU_VAR(kernel_stack),%rsp
15570+ SAVE_ARGS 8*6,0,0
15571+ pax_enter_kernel_user
15572+
15573+#ifdef CONFIG_PAX_RANDKSTACK
15574+ pax_erase_kstack
15575+#endif
15576+
15577 /*
15578 * No need to follow this irqs on/off section: the syscall
15579 * disabled irqs and here we enable it straight after entry:
15580 */
15581 ENABLE_INTERRUPTS(CLBR_NONE)
15582- SAVE_ARGS 8,0,0
15583 movl %eax,%eax /* zero extension */
15584 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
15585 movq %rcx,RIP-ARGOFFSET(%rsp)
15586@@ -304,12 +367,25 @@ ENTRY(ia32_cstar_target)
15587 /* no need to do an access_ok check here because r8 has been
15588 32bit zero extended */
15589 /* hardware stack frame is complete now */
15590+
15591+#ifdef CONFIG_PAX_MEMORY_UDEREF
15592+ ASM_PAX_OPEN_USERLAND
15593+ movq pax_user_shadow_base,%r8
15594+ addq RSP-ARGOFFSET(%rsp),%r8
15595+#endif
15596+
15597 ASM_STAC
15598 1: movl (%r8),%r9d
15599 _ASM_EXTABLE(1b,ia32_badarg)
15600 ASM_CLAC
15601- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15602- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15603+
15604+#ifdef CONFIG_PAX_MEMORY_UDEREF
15605+ ASM_PAX_CLOSE_USERLAND
15606+#endif
15607+
15608+ GET_THREAD_INFO(%r11)
15609+ orl $TS_COMPAT,TI_status(%r11)
15610+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15611 CFI_REMEMBER_STATE
15612 jnz cstar_tracesys
15613 cmpq $IA32_NR_syscalls-1,%rax
15614@@ -319,13 +395,16 @@ cstar_do_call:
15615 cstar_dispatch:
15616 call *ia32_sys_call_table(,%rax,8)
15617 movq %rax,RAX-ARGOFFSET(%rsp)
15618+ GET_THREAD_INFO(%r11)
15619 DISABLE_INTERRUPTS(CLBR_NONE)
15620 TRACE_IRQS_OFF
15621- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15622+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
15623 jnz sysretl_audit
15624 sysretl_from_sys_call:
15625- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15626- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
15627+ pax_exit_kernel_user
15628+ pax_erase_kstack
15629+ andl $~TS_COMPAT,TI_status(%r11)
15630+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
15631 movl RIP-ARGOFFSET(%rsp),%ecx
15632 CFI_REGISTER rip,rcx
15633 movl EFLAGS-ARGOFFSET(%rsp),%r11d
15634@@ -352,7 +431,7 @@ sysretl_audit:
15635
15636 cstar_tracesys:
15637 #ifdef CONFIG_AUDITSYSCALL
15638- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15639+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
15640 jz cstar_auditsys
15641 #endif
15642 xchgl %r9d,%ebp
15643@@ -366,11 +445,19 @@ cstar_tracesys:
15644 xchgl %ebp,%r9d
15645 cmpq $(IA32_NR_syscalls-1),%rax
15646 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
15647+
15648+ pax_erase_kstack
15649+
15650 jmp cstar_do_call
15651 END(ia32_cstar_target)
15652
15653 ia32_badarg:
15654 ASM_CLAC
15655+
15656+#ifdef CONFIG_PAX_MEMORY_UDEREF
15657+ ASM_PAX_CLOSE_USERLAND
15658+#endif
15659+
15660 movq $-EFAULT,%rax
15661 jmp ia32_sysret
15662 CFI_ENDPROC
15663@@ -407,19 +494,26 @@ ENTRY(ia32_syscall)
15664 CFI_REL_OFFSET rip,RIP-RIP
15665 PARAVIRT_ADJUST_EXCEPTION_FRAME
15666 SWAPGS
15667- /*
15668- * No need to follow this irqs on/off section: the syscall
15669- * disabled irqs and here we enable it straight after entry:
15670- */
15671- ENABLE_INTERRUPTS(CLBR_NONE)
15672 movl %eax,%eax
15673 pushq_cfi %rax
15674 cld
15675 /* note the registers are not zero extended to the sf.
15676 this could be a problem. */
15677 SAVE_ARGS 0,1,0
15678- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15679- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15680+ pax_enter_kernel_user
15681+
15682+#ifdef CONFIG_PAX_RANDKSTACK
15683+ pax_erase_kstack
15684+#endif
15685+
15686+ /*
15687+ * No need to follow this irqs on/off section: the syscall
15688+ * disabled irqs and here we enable it straight after entry:
15689+ */
15690+ ENABLE_INTERRUPTS(CLBR_NONE)
15691+ GET_THREAD_INFO(%r11)
15692+ orl $TS_COMPAT,TI_status(%r11)
15693+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15694 jnz ia32_tracesys
15695 cmpq $(IA32_NR_syscalls-1),%rax
15696 ja ia32_badsys
15697@@ -442,6 +536,9 @@ ia32_tracesys:
15698 RESTORE_REST
15699 cmpq $(IA32_NR_syscalls-1),%rax
15700 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
15701+
15702+ pax_erase_kstack
15703+
15704 jmp ia32_do_call
15705 END(ia32_syscall)
15706
15707diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
15708index 8e0ceec..af13504 100644
15709--- a/arch/x86/ia32/sys_ia32.c
15710+++ b/arch/x86/ia32/sys_ia32.c
15711@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
15712 */
15713 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
15714 {
15715- typeof(ubuf->st_uid) uid = 0;
15716- typeof(ubuf->st_gid) gid = 0;
15717+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
15718+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
15719 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
15720 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
15721 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
15722diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
15723index 372231c..51b537d 100644
15724--- a/arch/x86/include/asm/alternative-asm.h
15725+++ b/arch/x86/include/asm/alternative-asm.h
15726@@ -18,6 +18,45 @@
15727 .endm
15728 #endif
15729
15730+#ifdef KERNEXEC_PLUGIN
15731+ .macro pax_force_retaddr_bts rip=0
15732+ btsq $63,\rip(%rsp)
15733+ .endm
15734+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
15735+ .macro pax_force_retaddr rip=0, reload=0
15736+ btsq $63,\rip(%rsp)
15737+ .endm
15738+ .macro pax_force_fptr ptr
15739+ btsq $63,\ptr
15740+ .endm
15741+ .macro pax_set_fptr_mask
15742+ .endm
15743+#endif
15744+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15745+ .macro pax_force_retaddr rip=0, reload=0
15746+ .if \reload
15747+ pax_set_fptr_mask
15748+ .endif
15749+ orq %r12,\rip(%rsp)
15750+ .endm
15751+ .macro pax_force_fptr ptr
15752+ orq %r12,\ptr
15753+ .endm
15754+ .macro pax_set_fptr_mask
15755+ movabs $0x8000000000000000,%r12
15756+ .endm
15757+#endif
15758+#else
15759+ .macro pax_force_retaddr rip=0, reload=0
15760+ .endm
15761+ .macro pax_force_fptr ptr
15762+ .endm
15763+ .macro pax_force_retaddr_bts rip=0
15764+ .endm
15765+ .macro pax_set_fptr_mask
15766+ .endm
15767+#endif
15768+
15769 .macro altinstruction_entry orig alt feature orig_len alt_len
15770 .long \orig - .
15771 .long \alt - .
15772diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
15773index 473bdbe..b1e3377 100644
15774--- a/arch/x86/include/asm/alternative.h
15775+++ b/arch/x86/include/asm/alternative.h
15776@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15777 ".pushsection .discard,\"aw\",@progbits\n" \
15778 DISCARD_ENTRY(1) \
15779 ".popsection\n" \
15780- ".pushsection .altinstr_replacement, \"ax\"\n" \
15781+ ".pushsection .altinstr_replacement, \"a\"\n" \
15782 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
15783 ".popsection"
15784
15785@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15786 DISCARD_ENTRY(1) \
15787 DISCARD_ENTRY(2) \
15788 ".popsection\n" \
15789- ".pushsection .altinstr_replacement, \"ax\"\n" \
15790+ ".pushsection .altinstr_replacement, \"a\"\n" \
15791 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
15792 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
15793 ".popsection"
15794diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
15795index 465b309..ab7e51f 100644
15796--- a/arch/x86/include/asm/apic.h
15797+++ b/arch/x86/include/asm/apic.h
15798@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
15799
15800 #ifdef CONFIG_X86_LOCAL_APIC
15801
15802-extern unsigned int apic_verbosity;
15803+extern int apic_verbosity;
15804 extern int local_apic_timer_c2_ok;
15805
15806 extern int disable_apic;
15807diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
15808index 20370c6..a2eb9b0 100644
15809--- a/arch/x86/include/asm/apm.h
15810+++ b/arch/x86/include/asm/apm.h
15811@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
15812 __asm__ __volatile__(APM_DO_ZERO_SEGS
15813 "pushl %%edi\n\t"
15814 "pushl %%ebp\n\t"
15815- "lcall *%%cs:apm_bios_entry\n\t"
15816+ "lcall *%%ss:apm_bios_entry\n\t"
15817 "setc %%al\n\t"
15818 "popl %%ebp\n\t"
15819 "popl %%edi\n\t"
15820@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
15821 __asm__ __volatile__(APM_DO_ZERO_SEGS
15822 "pushl %%edi\n\t"
15823 "pushl %%ebp\n\t"
15824- "lcall *%%cs:apm_bios_entry\n\t"
15825+ "lcall *%%ss:apm_bios_entry\n\t"
15826 "setc %%bl\n\t"
15827 "popl %%ebp\n\t"
15828 "popl %%edi\n\t"
15829diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
15830index 6dd1c7dd..2edd216 100644
15831--- a/arch/x86/include/asm/atomic.h
15832+++ b/arch/x86/include/asm/atomic.h
15833@@ -24,7 +24,18 @@
15834 */
15835 static inline int atomic_read(const atomic_t *v)
15836 {
15837- return (*(volatile int *)&(v)->counter);
15838+ return (*(volatile const int *)&(v)->counter);
15839+}
15840+
15841+/**
15842+ * atomic_read_unchecked - read atomic variable
15843+ * @v: pointer of type atomic_unchecked_t
15844+ *
15845+ * Atomically reads the value of @v.
15846+ */
15847+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
15848+{
15849+ return (*(volatile const int *)&(v)->counter);
15850 }
15851
15852 /**
15853@@ -40,6 +51,18 @@ static inline void atomic_set(atomic_t *v, int i)
15854 }
15855
15856 /**
15857+ * atomic_set_unchecked - set atomic variable
15858+ * @v: pointer of type atomic_unchecked_t
15859+ * @i: required value
15860+ *
15861+ * Atomically sets the value of @v to @i.
15862+ */
15863+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
15864+{
15865+ v->counter = i;
15866+}
15867+
15868+/**
15869 * atomic_add - add integer to atomic variable
15870 * @i: integer value to add
15871 * @v: pointer of type atomic_t
15872@@ -48,7 +71,29 @@ static inline void atomic_set(atomic_t *v, int i)
15873 */
15874 static inline void atomic_add(int i, atomic_t *v)
15875 {
15876- asm volatile(LOCK_PREFIX "addl %1,%0"
15877+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15878+
15879+#ifdef CONFIG_PAX_REFCOUNT
15880+ "jno 0f\n"
15881+ LOCK_PREFIX "subl %1,%0\n"
15882+ "int $4\n0:\n"
15883+ _ASM_EXTABLE(0b, 0b)
15884+#endif
15885+
15886+ : "+m" (v->counter)
15887+ : "ir" (i));
15888+}
15889+
15890+/**
15891+ * atomic_add_unchecked - add integer to atomic variable
15892+ * @i: integer value to add
15893+ * @v: pointer of type atomic_unchecked_t
15894+ *
15895+ * Atomically adds @i to @v.
15896+ */
15897+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
15898+{
15899+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15900 : "+m" (v->counter)
15901 : "ir" (i));
15902 }
15903@@ -62,7 +107,29 @@ static inline void atomic_add(int i, atomic_t *v)
15904 */
15905 static inline void atomic_sub(int i, atomic_t *v)
15906 {
15907- asm volatile(LOCK_PREFIX "subl %1,%0"
15908+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15909+
15910+#ifdef CONFIG_PAX_REFCOUNT
15911+ "jno 0f\n"
15912+ LOCK_PREFIX "addl %1,%0\n"
15913+ "int $4\n0:\n"
15914+ _ASM_EXTABLE(0b, 0b)
15915+#endif
15916+
15917+ : "+m" (v->counter)
15918+ : "ir" (i));
15919+}
15920+
15921+/**
15922+ * atomic_sub_unchecked - subtract integer from atomic variable
15923+ * @i: integer value to subtract
15924+ * @v: pointer of type atomic_unchecked_t
15925+ *
15926+ * Atomically subtracts @i from @v.
15927+ */
15928+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
15929+{
15930+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15931 : "+m" (v->counter)
15932 : "ir" (i));
15933 }
15934@@ -78,7 +145,7 @@ static inline void atomic_sub(int i, atomic_t *v)
15935 */
15936 static inline int atomic_sub_and_test(int i, atomic_t *v)
15937 {
15938- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
15939+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
15940 }
15941
15942 /**
15943@@ -89,7 +156,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
15944 */
15945 static inline void atomic_inc(atomic_t *v)
15946 {
15947- asm volatile(LOCK_PREFIX "incl %0"
15948+ asm volatile(LOCK_PREFIX "incl %0\n"
15949+
15950+#ifdef CONFIG_PAX_REFCOUNT
15951+ "jno 0f\n"
15952+ LOCK_PREFIX "decl %0\n"
15953+ "int $4\n0:\n"
15954+ _ASM_EXTABLE(0b, 0b)
15955+#endif
15956+
15957+ : "+m" (v->counter));
15958+}
15959+
15960+/**
15961+ * atomic_inc_unchecked - increment atomic variable
15962+ * @v: pointer of type atomic_unchecked_t
15963+ *
15964+ * Atomically increments @v by 1.
15965+ */
15966+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
15967+{
15968+ asm volatile(LOCK_PREFIX "incl %0\n"
15969 : "+m" (v->counter));
15970 }
15971
15972@@ -101,7 +188,27 @@ static inline void atomic_inc(atomic_t *v)
15973 */
15974 static inline void atomic_dec(atomic_t *v)
15975 {
15976- asm volatile(LOCK_PREFIX "decl %0"
15977+ asm volatile(LOCK_PREFIX "decl %0\n"
15978+
15979+#ifdef CONFIG_PAX_REFCOUNT
15980+ "jno 0f\n"
15981+ LOCK_PREFIX "incl %0\n"
15982+ "int $4\n0:\n"
15983+ _ASM_EXTABLE(0b, 0b)
15984+#endif
15985+
15986+ : "+m" (v->counter));
15987+}
15988+
15989+/**
15990+ * atomic_dec_unchecked - decrement atomic variable
15991+ * @v: pointer of type atomic_unchecked_t
15992+ *
15993+ * Atomically decrements @v by 1.
15994+ */
15995+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
15996+{
15997+ asm volatile(LOCK_PREFIX "decl %0\n"
15998 : "+m" (v->counter));
15999 }
16000
16001@@ -115,7 +222,7 @@ static inline void atomic_dec(atomic_t *v)
16002 */
16003 static inline int atomic_dec_and_test(atomic_t *v)
16004 {
16005- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
16006+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
16007 }
16008
16009 /**
16010@@ -128,7 +235,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
16011 */
16012 static inline int atomic_inc_and_test(atomic_t *v)
16013 {
16014- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
16015+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
16016+}
16017+
16018+/**
16019+ * atomic_inc_and_test_unchecked - increment and test
16020+ * @v: pointer of type atomic_unchecked_t
16021+ *
16022+ * Atomically increments @v by 1
16023+ * and returns true if the result is zero, or false for all
16024+ * other cases.
16025+ */
16026+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
16027+{
16028+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
16029 }
16030
16031 /**
16032@@ -142,7 +262,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
16033 */
16034 static inline int atomic_add_negative(int i, atomic_t *v)
16035 {
16036- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
16037+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
16038 }
16039
16040 /**
16041@@ -154,6 +274,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
16042 */
16043 static inline int atomic_add_return(int i, atomic_t *v)
16044 {
16045+ return i + xadd_check_overflow(&v->counter, i);
16046+}
16047+
16048+/**
16049+ * atomic_add_return_unchecked - add integer and return
16050+ * @i: integer value to add
16051+ * @v: pointer of type atomic_unchecked_t
16052+ *
16053+ * Atomically adds @i to @v and returns @i + @v
16054+ */
16055+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
16056+{
16057 return i + xadd(&v->counter, i);
16058 }
16059
16060@@ -170,9 +302,18 @@ static inline int atomic_sub_return(int i, atomic_t *v)
16061 }
16062
16063 #define atomic_inc_return(v) (atomic_add_return(1, v))
16064+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
16065+{
16066+ return atomic_add_return_unchecked(1, v);
16067+}
16068 #define atomic_dec_return(v) (atomic_sub_return(1, v))
16069
16070-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
16071+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
16072+{
16073+ return cmpxchg(&v->counter, old, new);
16074+}
16075+
16076+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
16077 {
16078 return cmpxchg(&v->counter, old, new);
16079 }
16080@@ -182,6 +323,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
16081 return xchg(&v->counter, new);
16082 }
16083
16084+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
16085+{
16086+ return xchg(&v->counter, new);
16087+}
16088+
16089 /**
16090 * __atomic_add_unless - add unless the number is already a given value
16091 * @v: pointer of type atomic_t
16092@@ -191,14 +337,27 @@ static inline int atomic_xchg(atomic_t *v, int new)
16093 * Atomically adds @a to @v, so long as @v was not already @u.
16094 * Returns the old value of @v.
16095 */
16096-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
16097+static inline int __intentional_overflow(-1) __atomic_add_unless(atomic_t *v, int a, int u)
16098 {
16099- int c, old;
16100+ int c, old, new;
16101 c = atomic_read(v);
16102 for (;;) {
16103- if (unlikely(c == (u)))
16104+ if (unlikely(c == u))
16105 break;
16106- old = atomic_cmpxchg((v), c, c + (a));
16107+
16108+ asm volatile("addl %2,%0\n"
16109+
16110+#ifdef CONFIG_PAX_REFCOUNT
16111+ "jno 0f\n"
16112+ "subl %2,%0\n"
16113+ "int $4\n0:\n"
16114+ _ASM_EXTABLE(0b, 0b)
16115+#endif
16116+
16117+ : "=r" (new)
16118+ : "0" (c), "ir" (a));
16119+
16120+ old = atomic_cmpxchg(v, c, new);
16121 if (likely(old == c))
16122 break;
16123 c = old;
16124@@ -207,6 +366,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
16125 }
16126
16127 /**
16128+ * atomic_inc_not_zero_hint - increment if not null
16129+ * @v: pointer of type atomic_t
16130+ * @hint: probable value of the atomic before the increment
16131+ *
16132+ * This version of atomic_inc_not_zero() gives a hint of probable
16133+ * value of the atomic. This helps processor to not read the memory
16134+ * before doing the atomic read/modify/write cycle, lowering
16135+ * number of bus transactions on some arches.
16136+ *
16137+ * Returns: 0 if increment was not done, 1 otherwise.
16138+ */
16139+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
16140+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
16141+{
16142+ int val, c = hint, new;
16143+
16144+ /* sanity test, should be removed by compiler if hint is a constant */
16145+ if (!hint)
16146+ return __atomic_add_unless(v, 1, 0);
16147+
16148+ do {
16149+ asm volatile("incl %0\n"
16150+
16151+#ifdef CONFIG_PAX_REFCOUNT
16152+ "jno 0f\n"
16153+ "decl %0\n"
16154+ "int $4\n0:\n"
16155+ _ASM_EXTABLE(0b, 0b)
16156+#endif
16157+
16158+ : "=r" (new)
16159+ : "0" (c));
16160+
16161+ val = atomic_cmpxchg(v, c, new);
16162+ if (val == c)
16163+ return 1;
16164+ c = val;
16165+ } while (c);
16166+
16167+ return 0;
16168+}
16169+
16170+/**
16171 * atomic_inc_short - increment of a short integer
16172 * @v: pointer to type int
16173 *
16174@@ -235,14 +437,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
16175 #endif
16176
16177 /* These are x86-specific, used by some header files */
16178-#define atomic_clear_mask(mask, addr) \
16179- asm volatile(LOCK_PREFIX "andl %0,%1" \
16180- : : "r" (~(mask)), "m" (*(addr)) : "memory")
16181+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
16182+{
16183+ asm volatile(LOCK_PREFIX "andl %1,%0"
16184+ : "+m" (v->counter)
16185+ : "r" (~(mask))
16186+ : "memory");
16187+}
16188
16189-#define atomic_set_mask(mask, addr) \
16190- asm volatile(LOCK_PREFIX "orl %0,%1" \
16191- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
16192- : "memory")
16193+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
16194+{
16195+ asm volatile(LOCK_PREFIX "andl %1,%0"
16196+ : "+m" (v->counter)
16197+ : "r" (~(mask))
16198+ : "memory");
16199+}
16200+
16201+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
16202+{
16203+ asm volatile(LOCK_PREFIX "orl %1,%0"
16204+ : "+m" (v->counter)
16205+ : "r" (mask)
16206+ : "memory");
16207+}
16208+
16209+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
16210+{
16211+ asm volatile(LOCK_PREFIX "orl %1,%0"
16212+ : "+m" (v->counter)
16213+ : "r" (mask)
16214+ : "memory");
16215+}
16216
16217 #ifdef CONFIG_X86_32
16218 # include <asm/atomic64_32.h>
16219diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
16220index b154de7..bf18a5a 100644
16221--- a/arch/x86/include/asm/atomic64_32.h
16222+++ b/arch/x86/include/asm/atomic64_32.h
16223@@ -12,6 +12,14 @@ typedef struct {
16224 u64 __aligned(8) counter;
16225 } atomic64_t;
16226
16227+#ifdef CONFIG_PAX_REFCOUNT
16228+typedef struct {
16229+ u64 __aligned(8) counter;
16230+} atomic64_unchecked_t;
16231+#else
16232+typedef atomic64_t atomic64_unchecked_t;
16233+#endif
16234+
16235 #define ATOMIC64_INIT(val) { (val) }
16236
16237 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
16238@@ -37,21 +45,31 @@ typedef struct {
16239 ATOMIC64_DECL_ONE(sym##_386)
16240
16241 ATOMIC64_DECL_ONE(add_386);
16242+ATOMIC64_DECL_ONE(add_unchecked_386);
16243 ATOMIC64_DECL_ONE(sub_386);
16244+ATOMIC64_DECL_ONE(sub_unchecked_386);
16245 ATOMIC64_DECL_ONE(inc_386);
16246+ATOMIC64_DECL_ONE(inc_unchecked_386);
16247 ATOMIC64_DECL_ONE(dec_386);
16248+ATOMIC64_DECL_ONE(dec_unchecked_386);
16249 #endif
16250
16251 #define alternative_atomic64(f, out, in...) \
16252 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
16253
16254 ATOMIC64_DECL(read);
16255+ATOMIC64_DECL(read_unchecked);
16256 ATOMIC64_DECL(set);
16257+ATOMIC64_DECL(set_unchecked);
16258 ATOMIC64_DECL(xchg);
16259 ATOMIC64_DECL(add_return);
16260+ATOMIC64_DECL(add_return_unchecked);
16261 ATOMIC64_DECL(sub_return);
16262+ATOMIC64_DECL(sub_return_unchecked);
16263 ATOMIC64_DECL(inc_return);
16264+ATOMIC64_DECL(inc_return_unchecked);
16265 ATOMIC64_DECL(dec_return);
16266+ATOMIC64_DECL(dec_return_unchecked);
16267 ATOMIC64_DECL(dec_if_positive);
16268 ATOMIC64_DECL(inc_not_zero);
16269 ATOMIC64_DECL(add_unless);
16270@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
16271 }
16272
16273 /**
16274+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
16275+ * @p: pointer to type atomic64_unchecked_t
16276+ * @o: expected value
16277+ * @n: new value
16278+ *
16279+ * Atomically sets @v to @n if it was equal to @o and returns
16280+ * the old value.
16281+ */
16282+
16283+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
16284+{
16285+ return cmpxchg64(&v->counter, o, n);
16286+}
16287+
16288+/**
16289 * atomic64_xchg - xchg atomic64 variable
16290 * @v: pointer to type atomic64_t
16291 * @n: value to assign
16292@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
16293 }
16294
16295 /**
16296+ * atomic64_set_unchecked - set atomic64 variable
16297+ * @v: pointer to type atomic64_unchecked_t
16298+ * @n: value to assign
16299+ *
16300+ * Atomically sets the value of @v to @n.
16301+ */
16302+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
16303+{
16304+ unsigned high = (unsigned)(i >> 32);
16305+ unsigned low = (unsigned)i;
16306+ alternative_atomic64(set, /* no output */,
16307+ "S" (v), "b" (low), "c" (high)
16308+ : "eax", "edx", "memory");
16309+}
16310+
16311+/**
16312 * atomic64_read - read atomic64 variable
16313 * @v: pointer to type atomic64_t
16314 *
16315@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
16316 }
16317
16318 /**
16319+ * atomic64_read_unchecked - read atomic64 variable
16320+ * @v: pointer to type atomic64_unchecked_t
16321+ *
16322+ * Atomically reads the value of @v and returns it.
16323+ */
16324+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
16325+{
16326+ long long r;
16327+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
16328+ return r;
16329+ }
16330+
16331+/**
16332 * atomic64_add_return - add and return
16333 * @i: integer value to add
16334 * @v: pointer to type atomic64_t
16335@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
16336 return i;
16337 }
16338
16339+/**
16340+ * atomic64_add_return_unchecked - add and return
16341+ * @i: integer value to add
16342+ * @v: pointer to type atomic64_unchecked_t
16343+ *
16344+ * Atomically adds @i to @v and returns @i + *@v
16345+ */
16346+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
16347+{
16348+ alternative_atomic64(add_return_unchecked,
16349+ ASM_OUTPUT2("+A" (i), "+c" (v)),
16350+ ASM_NO_INPUT_CLOBBER("memory"));
16351+ return i;
16352+}
16353+
16354 /*
16355 * Other variants with different arithmetic operators:
16356 */
16357@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
16358 return a;
16359 }
16360
16361+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
16362+{
16363+ long long a;
16364+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
16365+ "S" (v) : "memory", "ecx");
16366+ return a;
16367+}
16368+
16369 static inline long long atomic64_dec_return(atomic64_t *v)
16370 {
16371 long long a;
16372@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
16373 }
16374
16375 /**
16376+ * atomic64_add_unchecked - add integer to atomic64 variable
16377+ * @i: integer value to add
16378+ * @v: pointer to type atomic64_unchecked_t
16379+ *
16380+ * Atomically adds @i to @v.
16381+ */
16382+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
16383+{
16384+ __alternative_atomic64(add_unchecked, add_return_unchecked,
16385+ ASM_OUTPUT2("+A" (i), "+c" (v)),
16386+ ASM_NO_INPUT_CLOBBER("memory"));
16387+ return i;
16388+}
16389+
16390+/**
16391 * atomic64_sub - subtract the atomic64 variable
16392 * @i: integer value to subtract
16393 * @v: pointer to type atomic64_t
16394diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
16395index 46e9052..ae45136 100644
16396--- a/arch/x86/include/asm/atomic64_64.h
16397+++ b/arch/x86/include/asm/atomic64_64.h
16398@@ -18,7 +18,19 @@
16399 */
16400 static inline long atomic64_read(const atomic64_t *v)
16401 {
16402- return (*(volatile long *)&(v)->counter);
16403+ return (*(volatile const long *)&(v)->counter);
16404+}
16405+
16406+/**
16407+ * atomic64_read_unchecked - read atomic64 variable
16408+ * @v: pointer of type atomic64_unchecked_t
16409+ *
16410+ * Atomically reads the value of @v.
16411+ * Doesn't imply a read memory barrier.
16412+ */
16413+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
16414+{
16415+ return (*(volatile const long *)&(v)->counter);
16416 }
16417
16418 /**
16419@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
16420 }
16421
16422 /**
16423+ * atomic64_set_unchecked - set atomic64 variable
16424+ * @v: pointer to type atomic64_unchecked_t
16425+ * @i: required value
16426+ *
16427+ * Atomically sets the value of @v to @i.
16428+ */
16429+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
16430+{
16431+ v->counter = i;
16432+}
16433+
16434+/**
16435 * atomic64_add - add integer to atomic64 variable
16436 * @i: integer value to add
16437 * @v: pointer to type atomic64_t
16438@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
16439 */
16440 static inline void atomic64_add(long i, atomic64_t *v)
16441 {
16442+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
16443+
16444+#ifdef CONFIG_PAX_REFCOUNT
16445+ "jno 0f\n"
16446+ LOCK_PREFIX "subq %1,%0\n"
16447+ "int $4\n0:\n"
16448+ _ASM_EXTABLE(0b, 0b)
16449+#endif
16450+
16451+ : "=m" (v->counter)
16452+ : "er" (i), "m" (v->counter));
16453+}
16454+
16455+/**
16456+ * atomic64_add_unchecked - add integer to atomic64 variable
16457+ * @i: integer value to add
16458+ * @v: pointer to type atomic64_unchecked_t
16459+ *
16460+ * Atomically adds @i to @v.
16461+ */
16462+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
16463+{
16464 asm volatile(LOCK_PREFIX "addq %1,%0"
16465 : "=m" (v->counter)
16466 : "er" (i), "m" (v->counter));
16467@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
16468 */
16469 static inline void atomic64_sub(long i, atomic64_t *v)
16470 {
16471- asm volatile(LOCK_PREFIX "subq %1,%0"
16472+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
16473+
16474+#ifdef CONFIG_PAX_REFCOUNT
16475+ "jno 0f\n"
16476+ LOCK_PREFIX "addq %1,%0\n"
16477+ "int $4\n0:\n"
16478+ _ASM_EXTABLE(0b, 0b)
16479+#endif
16480+
16481+ : "=m" (v->counter)
16482+ : "er" (i), "m" (v->counter));
16483+}
16484+
16485+/**
16486+ * atomic64_sub_unchecked - subtract the atomic64 variable
16487+ * @i: integer value to subtract
16488+ * @v: pointer to type atomic64_unchecked_t
16489+ *
16490+ * Atomically subtracts @i from @v.
16491+ */
16492+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
16493+{
16494+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
16495 : "=m" (v->counter)
16496 : "er" (i), "m" (v->counter));
16497 }
16498@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
16499 */
16500 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
16501 {
16502- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
16503+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
16504 }
16505
16506 /**
16507@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
16508 */
16509 static inline void atomic64_inc(atomic64_t *v)
16510 {
16511+ asm volatile(LOCK_PREFIX "incq %0\n"
16512+
16513+#ifdef CONFIG_PAX_REFCOUNT
16514+ "jno 0f\n"
16515+ LOCK_PREFIX "decq %0\n"
16516+ "int $4\n0:\n"
16517+ _ASM_EXTABLE(0b, 0b)
16518+#endif
16519+
16520+ : "=m" (v->counter)
16521+ : "m" (v->counter));
16522+}
16523+
16524+/**
16525+ * atomic64_inc_unchecked - increment atomic64 variable
16526+ * @v: pointer to type atomic64_unchecked_t
16527+ *
16528+ * Atomically increments @v by 1.
16529+ */
16530+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
16531+{
16532 asm volatile(LOCK_PREFIX "incq %0"
16533 : "=m" (v->counter)
16534 : "m" (v->counter));
16535@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
16536 */
16537 static inline void atomic64_dec(atomic64_t *v)
16538 {
16539- asm volatile(LOCK_PREFIX "decq %0"
16540+ asm volatile(LOCK_PREFIX "decq %0\n"
16541+
16542+#ifdef CONFIG_PAX_REFCOUNT
16543+ "jno 0f\n"
16544+ LOCK_PREFIX "incq %0\n"
16545+ "int $4\n0:\n"
16546+ _ASM_EXTABLE(0b, 0b)
16547+#endif
16548+
16549+ : "=m" (v->counter)
16550+ : "m" (v->counter));
16551+}
16552+
16553+/**
16554+ * atomic64_dec_unchecked - decrement atomic64 variable
16555+ * @v: pointer to type atomic64_t
16556+ *
16557+ * Atomically decrements @v by 1.
16558+ */
16559+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
16560+{
16561+ asm volatile(LOCK_PREFIX "decq %0\n"
16562 : "=m" (v->counter)
16563 : "m" (v->counter));
16564 }
16565@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
16566 */
16567 static inline int atomic64_dec_and_test(atomic64_t *v)
16568 {
16569- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
16570+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
16571 }
16572
16573 /**
16574@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
16575 */
16576 static inline int atomic64_inc_and_test(atomic64_t *v)
16577 {
16578- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
16579+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
16580 }
16581
16582 /**
16583@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
16584 */
16585 static inline int atomic64_add_negative(long i, atomic64_t *v)
16586 {
16587- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
16588+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
16589 }
16590
16591 /**
16592@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
16593 */
16594 static inline long atomic64_add_return(long i, atomic64_t *v)
16595 {
16596+ return i + xadd_check_overflow(&v->counter, i);
16597+}
16598+
16599+/**
16600+ * atomic64_add_return_unchecked - add and return
16601+ * @i: integer value to add
16602+ * @v: pointer to type atomic64_unchecked_t
16603+ *
16604+ * Atomically adds @i to @v and returns @i + @v
16605+ */
16606+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
16607+{
16608 return i + xadd(&v->counter, i);
16609 }
16610
16611@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
16612 }
16613
16614 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
16615+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
16616+{
16617+ return atomic64_add_return_unchecked(1, v);
16618+}
16619 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
16620
16621 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
16622@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
16623 return cmpxchg(&v->counter, old, new);
16624 }
16625
16626+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
16627+{
16628+ return cmpxchg(&v->counter, old, new);
16629+}
16630+
16631 static inline long atomic64_xchg(atomic64_t *v, long new)
16632 {
16633 return xchg(&v->counter, new);
16634@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
16635 */
16636 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
16637 {
16638- long c, old;
16639+ long c, old, new;
16640 c = atomic64_read(v);
16641 for (;;) {
16642- if (unlikely(c == (u)))
16643+ if (unlikely(c == u))
16644 break;
16645- old = atomic64_cmpxchg((v), c, c + (a));
16646+
16647+ asm volatile("add %2,%0\n"
16648+
16649+#ifdef CONFIG_PAX_REFCOUNT
16650+ "jno 0f\n"
16651+ "sub %2,%0\n"
16652+ "int $4\n0:\n"
16653+ _ASM_EXTABLE(0b, 0b)
16654+#endif
16655+
16656+ : "=r" (new)
16657+ : "0" (c), "ir" (a));
16658+
16659+ old = atomic64_cmpxchg(v, c, new);
16660 if (likely(old == c))
16661 break;
16662 c = old;
16663 }
16664- return c != (u);
16665+ return c != u;
16666 }
16667
16668 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
16669diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
16670index 0f4460b..fa1ee19 100644
16671--- a/arch/x86/include/asm/barrier.h
16672+++ b/arch/x86/include/asm/barrier.h
16673@@ -107,7 +107,7 @@
16674 do { \
16675 compiletime_assert_atomic_type(*p); \
16676 smp_mb(); \
16677- ACCESS_ONCE(*p) = (v); \
16678+ ACCESS_ONCE_RW(*p) = (v); \
16679 } while (0)
16680
16681 #define smp_load_acquire(p) \
16682@@ -124,7 +124,7 @@ do { \
16683 do { \
16684 compiletime_assert_atomic_type(*p); \
16685 barrier(); \
16686- ACCESS_ONCE(*p) = (v); \
16687+ ACCESS_ONCE_RW(*p) = (v); \
16688 } while (0)
16689
16690 #define smp_load_acquire(p) \
16691diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
16692index cfe3b95..d01b118 100644
16693--- a/arch/x86/include/asm/bitops.h
16694+++ b/arch/x86/include/asm/bitops.h
16695@@ -50,7 +50,7 @@
16696 * a mask operation on a byte.
16697 */
16698 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
16699-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
16700+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
16701 #define CONST_MASK(nr) (1 << ((nr) & 7))
16702
16703 /**
16704@@ -203,7 +203,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
16705 */
16706 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
16707 {
16708- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16709+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16710 }
16711
16712 /**
16713@@ -249,7 +249,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
16714 */
16715 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
16716 {
16717- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16718+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16719 }
16720
16721 /**
16722@@ -302,7 +302,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
16723 */
16724 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
16725 {
16726- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16727+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16728 }
16729
16730 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
16731@@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
16732 *
16733 * Undefined if no bit exists, so code should check against 0 first.
16734 */
16735-static inline unsigned long __ffs(unsigned long word)
16736+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
16737 {
16738 asm("rep; bsf %1,%0"
16739 : "=r" (word)
16740@@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word)
16741 *
16742 * Undefined if no zero exists, so code should check against ~0UL first.
16743 */
16744-static inline unsigned long ffz(unsigned long word)
16745+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
16746 {
16747 asm("rep; bsf %1,%0"
16748 : "=r" (word)
16749@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
16750 *
16751 * Undefined if no set bit exists, so code should check against 0 first.
16752 */
16753-static inline unsigned long __fls(unsigned long word)
16754+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
16755 {
16756 asm("bsr %1,%0"
16757 : "=r" (word)
16758@@ -434,7 +434,7 @@ static inline int ffs(int x)
16759 * set bit if value is nonzero. The last (most significant) bit is
16760 * at position 32.
16761 */
16762-static inline int fls(int x)
16763+static inline int __intentional_overflow(-1) fls(int x)
16764 {
16765 int r;
16766
16767@@ -476,7 +476,7 @@ static inline int fls(int x)
16768 * at position 64.
16769 */
16770 #ifdef CONFIG_X86_64
16771-static __always_inline int fls64(__u64 x)
16772+static __always_inline __intentional_overflow(-1) int fls64(__u64 x)
16773 {
16774 int bitpos = -1;
16775 /*
16776diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
16777index 4fa687a..60f2d39 100644
16778--- a/arch/x86/include/asm/boot.h
16779+++ b/arch/x86/include/asm/boot.h
16780@@ -6,10 +6,15 @@
16781 #include <uapi/asm/boot.h>
16782
16783 /* Physical address where kernel should be loaded. */
16784-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16785+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16786 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16787 & ~(CONFIG_PHYSICAL_ALIGN - 1))
16788
16789+#ifndef __ASSEMBLY__
16790+extern unsigned char __LOAD_PHYSICAL_ADDR[];
16791+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
16792+#endif
16793+
16794 /* Minimum kernel alignment, as a power of two */
16795 #ifdef CONFIG_X86_64
16796 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
16797diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
16798index 48f99f1..d78ebf9 100644
16799--- a/arch/x86/include/asm/cache.h
16800+++ b/arch/x86/include/asm/cache.h
16801@@ -5,12 +5,13 @@
16802
16803 /* L1 cache line size */
16804 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
16805-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
16806+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
16807
16808 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
16809+#define __read_only __attribute__((__section__(".data..read_only")))
16810
16811 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
16812-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
16813+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
16814
16815 #ifdef CONFIG_X86_VSMP
16816 #ifdef CONFIG_SMP
16817diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
16818index 9863ee3..4a1f8e1 100644
16819--- a/arch/x86/include/asm/cacheflush.h
16820+++ b/arch/x86/include/asm/cacheflush.h
16821@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
16822 unsigned long pg_flags = pg->flags & _PGMT_MASK;
16823
16824 if (pg_flags == _PGMT_DEFAULT)
16825- return -1;
16826+ return ~0UL;
16827 else if (pg_flags == _PGMT_WC)
16828 return _PAGE_CACHE_WC;
16829 else if (pg_flags == _PGMT_UC_MINUS)
16830diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
16831index cb4c73b..c473c29 100644
16832--- a/arch/x86/include/asm/calling.h
16833+++ b/arch/x86/include/asm/calling.h
16834@@ -82,103 +82,113 @@ For 32-bit we have the following conventions - kernel is built with
16835 #define RSP 152
16836 #define SS 160
16837
16838-#define ARGOFFSET R11
16839-#define SWFRAME ORIG_RAX
16840+#define ARGOFFSET R15
16841
16842 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1
16843- subq $9*8+\addskip, %rsp
16844- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
16845- movq_cfi rdi, 8*8
16846- movq_cfi rsi, 7*8
16847- movq_cfi rdx, 6*8
16848+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
16849+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
16850+ movq_cfi rdi, RDI
16851+ movq_cfi rsi, RSI
16852+ movq_cfi rdx, RDX
16853
16854 .if \save_rcx
16855- movq_cfi rcx, 5*8
16856+ movq_cfi rcx, RCX
16857 .endif
16858
16859- movq_cfi rax, 4*8
16860+ movq_cfi rax, RAX
16861
16862 .if \save_r891011
16863- movq_cfi r8, 3*8
16864- movq_cfi r9, 2*8
16865- movq_cfi r10, 1*8
16866- movq_cfi r11, 0*8
16867+ movq_cfi r8, R8
16868+ movq_cfi r9, R9
16869+ movq_cfi r10, R10
16870+ movq_cfi r11, R11
16871 .endif
16872
16873+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16874+ movq_cfi r12, R12
16875+#endif
16876+
16877 .endm
16878
16879-#define ARG_SKIP (9*8)
16880+#define ARG_SKIP ORIG_RAX
16881
16882 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
16883 rstor_r8910=1, rstor_rdx=1
16884+
16885+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16886+ movq_cfi_restore R12, r12
16887+#endif
16888+
16889 .if \rstor_r11
16890- movq_cfi_restore 0*8, r11
16891+ movq_cfi_restore R11, r11
16892 .endif
16893
16894 .if \rstor_r8910
16895- movq_cfi_restore 1*8, r10
16896- movq_cfi_restore 2*8, r9
16897- movq_cfi_restore 3*8, r8
16898+ movq_cfi_restore R10, r10
16899+ movq_cfi_restore R9, r9
16900+ movq_cfi_restore R8, r8
16901 .endif
16902
16903 .if \rstor_rax
16904- movq_cfi_restore 4*8, rax
16905+ movq_cfi_restore RAX, rax
16906 .endif
16907
16908 .if \rstor_rcx
16909- movq_cfi_restore 5*8, rcx
16910+ movq_cfi_restore RCX, rcx
16911 .endif
16912
16913 .if \rstor_rdx
16914- movq_cfi_restore 6*8, rdx
16915+ movq_cfi_restore RDX, rdx
16916 .endif
16917
16918- movq_cfi_restore 7*8, rsi
16919- movq_cfi_restore 8*8, rdi
16920+ movq_cfi_restore RSI, rsi
16921+ movq_cfi_restore RDI, rdi
16922
16923- .if ARG_SKIP+\addskip > 0
16924- addq $ARG_SKIP+\addskip, %rsp
16925- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
16926+ .if ORIG_RAX+\addskip > 0
16927+ addq $ORIG_RAX+\addskip, %rsp
16928+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
16929 .endif
16930 .endm
16931
16932- .macro LOAD_ARGS offset, skiprax=0
16933- movq \offset(%rsp), %r11
16934- movq \offset+8(%rsp), %r10
16935- movq \offset+16(%rsp), %r9
16936- movq \offset+24(%rsp), %r8
16937- movq \offset+40(%rsp), %rcx
16938- movq \offset+48(%rsp), %rdx
16939- movq \offset+56(%rsp), %rsi
16940- movq \offset+64(%rsp), %rdi
16941+ .macro LOAD_ARGS skiprax=0
16942+ movq R11(%rsp), %r11
16943+ movq R10(%rsp), %r10
16944+ movq R9(%rsp), %r9
16945+ movq R8(%rsp), %r8
16946+ movq RCX(%rsp), %rcx
16947+ movq RDX(%rsp), %rdx
16948+ movq RSI(%rsp), %rsi
16949+ movq RDI(%rsp), %rdi
16950 .if \skiprax
16951 .else
16952- movq \offset+72(%rsp), %rax
16953+ movq RAX(%rsp), %rax
16954 .endif
16955 .endm
16956
16957-#define REST_SKIP (6*8)
16958-
16959 .macro SAVE_REST
16960- subq $REST_SKIP, %rsp
16961- CFI_ADJUST_CFA_OFFSET REST_SKIP
16962- movq_cfi rbx, 5*8
16963- movq_cfi rbp, 4*8
16964- movq_cfi r12, 3*8
16965- movq_cfi r13, 2*8
16966- movq_cfi r14, 1*8
16967- movq_cfi r15, 0*8
16968+ movq_cfi rbx, RBX
16969+ movq_cfi rbp, RBP
16970+
16971+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16972+ movq_cfi r12, R12
16973+#endif
16974+
16975+ movq_cfi r13, R13
16976+ movq_cfi r14, R14
16977+ movq_cfi r15, R15
16978 .endm
16979
16980 .macro RESTORE_REST
16981- movq_cfi_restore 0*8, r15
16982- movq_cfi_restore 1*8, r14
16983- movq_cfi_restore 2*8, r13
16984- movq_cfi_restore 3*8, r12
16985- movq_cfi_restore 4*8, rbp
16986- movq_cfi_restore 5*8, rbx
16987- addq $REST_SKIP, %rsp
16988- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
16989+ movq_cfi_restore R15, r15
16990+ movq_cfi_restore R14, r14
16991+ movq_cfi_restore R13, r13
16992+
16993+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16994+ movq_cfi_restore R12, r12
16995+#endif
16996+
16997+ movq_cfi_restore RBP, rbp
16998+ movq_cfi_restore RBX, rbx
16999 .endm
17000
17001 .macro SAVE_ALL
17002diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
17003index f50de69..2b0a458 100644
17004--- a/arch/x86/include/asm/checksum_32.h
17005+++ b/arch/x86/include/asm/checksum_32.h
17006@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
17007 int len, __wsum sum,
17008 int *src_err_ptr, int *dst_err_ptr);
17009
17010+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
17011+ int len, __wsum sum,
17012+ int *src_err_ptr, int *dst_err_ptr);
17013+
17014+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
17015+ int len, __wsum sum,
17016+ int *src_err_ptr, int *dst_err_ptr);
17017+
17018 /*
17019 * Note: when you get a NULL pointer exception here this means someone
17020 * passed in an incorrect kernel address to one of these functions.
17021@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
17022
17023 might_sleep();
17024 stac();
17025- ret = csum_partial_copy_generic((__force void *)src, dst,
17026+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
17027 len, sum, err_ptr, NULL);
17028 clac();
17029
17030@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
17031 might_sleep();
17032 if (access_ok(VERIFY_WRITE, dst, len)) {
17033 stac();
17034- ret = csum_partial_copy_generic(src, (__force void *)dst,
17035+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
17036 len, sum, NULL, err_ptr);
17037 clac();
17038 return ret;
17039diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
17040index 99c105d7..2f667ac 100644
17041--- a/arch/x86/include/asm/cmpxchg.h
17042+++ b/arch/x86/include/asm/cmpxchg.h
17043@@ -16,8 +16,12 @@ extern void __cmpxchg_wrong_size(void)
17044 __compiletime_error("Bad argument size for cmpxchg");
17045 extern void __xadd_wrong_size(void)
17046 __compiletime_error("Bad argument size for xadd");
17047+extern void __xadd_check_overflow_wrong_size(void)
17048+ __compiletime_error("Bad argument size for xadd_check_overflow");
17049 extern void __add_wrong_size(void)
17050 __compiletime_error("Bad argument size for add");
17051+extern void __add_check_overflow_wrong_size(void)
17052+ __compiletime_error("Bad argument size for add_check_overflow");
17053
17054 /*
17055 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
17056@@ -69,6 +73,38 @@ extern void __add_wrong_size(void)
17057 __ret; \
17058 })
17059
17060+#ifdef CONFIG_PAX_REFCOUNT
17061+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
17062+ ({ \
17063+ __typeof__ (*(ptr)) __ret = (arg); \
17064+ switch (sizeof(*(ptr))) { \
17065+ case __X86_CASE_L: \
17066+ asm volatile (lock #op "l %0, %1\n" \
17067+ "jno 0f\n" \
17068+ "mov %0,%1\n" \
17069+ "int $4\n0:\n" \
17070+ _ASM_EXTABLE(0b, 0b) \
17071+ : "+r" (__ret), "+m" (*(ptr)) \
17072+ : : "memory", "cc"); \
17073+ break; \
17074+ case __X86_CASE_Q: \
17075+ asm volatile (lock #op "q %q0, %1\n" \
17076+ "jno 0f\n" \
17077+ "mov %0,%1\n" \
17078+ "int $4\n0:\n" \
17079+ _ASM_EXTABLE(0b, 0b) \
17080+ : "+r" (__ret), "+m" (*(ptr)) \
17081+ : : "memory", "cc"); \
17082+ break; \
17083+ default: \
17084+ __ ## op ## _check_overflow_wrong_size(); \
17085+ } \
17086+ __ret; \
17087+ })
17088+#else
17089+#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
17090+#endif
17091+
17092 /*
17093 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
17094 * Since this is generally used to protect other memory information, we
17095@@ -167,6 +203,9 @@ extern void __add_wrong_size(void)
17096 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
17097 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
17098
17099+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
17100+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
17101+
17102 #define __add(ptr, inc, lock) \
17103 ({ \
17104 __typeof__ (*(ptr)) __ret = (inc); \
17105diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
17106index 59c6c40..5e0b22c 100644
17107--- a/arch/x86/include/asm/compat.h
17108+++ b/arch/x86/include/asm/compat.h
17109@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
17110 typedef u32 compat_uint_t;
17111 typedef u32 compat_ulong_t;
17112 typedef u64 __attribute__((aligned(4))) compat_u64;
17113-typedef u32 compat_uptr_t;
17114+typedef u32 __user compat_uptr_t;
17115
17116 struct compat_timespec {
17117 compat_time_t tv_sec;
17118diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
17119index bb9b258..5fad1bf 100644
17120--- a/arch/x86/include/asm/cpufeature.h
17121+++ b/arch/x86/include/asm/cpufeature.h
17122@@ -203,14 +203,14 @@
17123 #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
17124 #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
17125
17126-
17127+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
17128 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
17129 #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
17130 #define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
17131 #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
17132 #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
17133 #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
17134-#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
17135+#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Prevention */
17136 #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
17137 #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
17138 #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
17139@@ -370,6 +370,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
17140 #undef cpu_has_centaur_mcr
17141 #define cpu_has_centaur_mcr 0
17142
17143+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
17144 #endif /* CONFIG_X86_64 */
17145
17146 #if __GNUC__ >= 4
17147@@ -422,7 +423,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
17148
17149 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
17150 t_warn:
17151- warn_pre_alternatives();
17152+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
17153+ warn_pre_alternatives();
17154 return false;
17155 #endif
17156
17157@@ -442,7 +444,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
17158 ".section .discard,\"aw\",@progbits\n"
17159 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
17160 ".previous\n"
17161- ".section .altinstr_replacement,\"ax\"\n"
17162+ ".section .altinstr_replacement,\"a\"\n"
17163 "3: movb $1,%0\n"
17164 "4:\n"
17165 ".previous\n"
17166@@ -479,7 +481,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
17167 " .byte 2b - 1b\n" /* src len */
17168 " .byte 4f - 3f\n" /* repl len */
17169 ".previous\n"
17170- ".section .altinstr_replacement,\"ax\"\n"
17171+ ".section .altinstr_replacement,\"a\"\n"
17172 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
17173 "4:\n"
17174 ".previous\n"
17175@@ -512,7 +514,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
17176 ".section .discard,\"aw\",@progbits\n"
17177 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
17178 ".previous\n"
17179- ".section .altinstr_replacement,\"ax\"\n"
17180+ ".section .altinstr_replacement,\"a\"\n"
17181 "3: movb $0,%0\n"
17182 "4:\n"
17183 ".previous\n"
17184@@ -526,7 +528,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
17185 ".section .discard,\"aw\",@progbits\n"
17186 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
17187 ".previous\n"
17188- ".section .altinstr_replacement,\"ax\"\n"
17189+ ".section .altinstr_replacement,\"a\"\n"
17190 "5: movb $1,%0\n"
17191 "6:\n"
17192 ".previous\n"
17193diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
17194index 50d033a..37deb26 100644
17195--- a/arch/x86/include/asm/desc.h
17196+++ b/arch/x86/include/asm/desc.h
17197@@ -4,6 +4,7 @@
17198 #include <asm/desc_defs.h>
17199 #include <asm/ldt.h>
17200 #include <asm/mmu.h>
17201+#include <asm/pgtable.h>
17202
17203 #include <linux/smp.h>
17204 #include <linux/percpu.h>
17205@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
17206
17207 desc->type = (info->read_exec_only ^ 1) << 1;
17208 desc->type |= info->contents << 2;
17209+ desc->type |= info->seg_not_present ^ 1;
17210
17211 desc->s = 1;
17212 desc->dpl = 0x3;
17213@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
17214 }
17215
17216 extern struct desc_ptr idt_descr;
17217-extern gate_desc idt_table[];
17218-extern struct desc_ptr debug_idt_descr;
17219-extern gate_desc debug_idt_table[];
17220-
17221-struct gdt_page {
17222- struct desc_struct gdt[GDT_ENTRIES];
17223-} __attribute__((aligned(PAGE_SIZE)));
17224-
17225-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
17226+extern gate_desc idt_table[IDT_ENTRIES];
17227+extern const struct desc_ptr debug_idt_descr;
17228+extern gate_desc debug_idt_table[IDT_ENTRIES];
17229
17230+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
17231 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
17232 {
17233- return per_cpu(gdt_page, cpu).gdt;
17234+ return cpu_gdt_table[cpu];
17235 }
17236
17237 #ifdef CONFIG_X86_64
17238@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
17239 unsigned long base, unsigned dpl, unsigned flags,
17240 unsigned short seg)
17241 {
17242- gate->a = (seg << 16) | (base & 0xffff);
17243- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
17244+ gate->gate.offset_low = base;
17245+ gate->gate.seg = seg;
17246+ gate->gate.reserved = 0;
17247+ gate->gate.type = type;
17248+ gate->gate.s = 0;
17249+ gate->gate.dpl = dpl;
17250+ gate->gate.p = 1;
17251+ gate->gate.offset_high = base >> 16;
17252 }
17253
17254 #endif
17255@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
17256
17257 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
17258 {
17259+ pax_open_kernel();
17260 memcpy(&idt[entry], gate, sizeof(*gate));
17261+ pax_close_kernel();
17262 }
17263
17264 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
17265 {
17266+ pax_open_kernel();
17267 memcpy(&ldt[entry], desc, 8);
17268+ pax_close_kernel();
17269 }
17270
17271 static inline void
17272@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
17273 default: size = sizeof(*gdt); break;
17274 }
17275
17276+ pax_open_kernel();
17277 memcpy(&gdt[entry], desc, size);
17278+ pax_close_kernel();
17279 }
17280
17281 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
17282@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
17283
17284 static inline void native_load_tr_desc(void)
17285 {
17286+ pax_open_kernel();
17287 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
17288+ pax_close_kernel();
17289 }
17290
17291 static inline void native_load_gdt(const struct desc_ptr *dtr)
17292@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
17293 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
17294 unsigned int i;
17295
17296+ pax_open_kernel();
17297 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
17298 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
17299+ pax_close_kernel();
17300 }
17301
17302 #define _LDT_empty(info) \
17303@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
17304 preempt_enable();
17305 }
17306
17307-static inline unsigned long get_desc_base(const struct desc_struct *desc)
17308+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
17309 {
17310 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
17311 }
17312@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
17313 }
17314
17315 #ifdef CONFIG_X86_64
17316-static inline void set_nmi_gate(int gate, void *addr)
17317+static inline void set_nmi_gate(int gate, const void *addr)
17318 {
17319 gate_desc s;
17320
17321@@ -321,14 +334,14 @@ static inline void set_nmi_gate(int gate, void *addr)
17322 #endif
17323
17324 #ifdef CONFIG_TRACING
17325-extern struct desc_ptr trace_idt_descr;
17326-extern gate_desc trace_idt_table[];
17327+extern const struct desc_ptr trace_idt_descr;
17328+extern gate_desc trace_idt_table[IDT_ENTRIES];
17329 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
17330 {
17331 write_idt_entry(trace_idt_table, entry, gate);
17332 }
17333
17334-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
17335+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
17336 unsigned dpl, unsigned ist, unsigned seg)
17337 {
17338 gate_desc s;
17339@@ -348,7 +361,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
17340 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
17341 #endif
17342
17343-static inline void _set_gate(int gate, unsigned type, void *addr,
17344+static inline void _set_gate(int gate, unsigned type, const void *addr,
17345 unsigned dpl, unsigned ist, unsigned seg)
17346 {
17347 gate_desc s;
17348@@ -371,9 +384,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
17349 #define set_intr_gate(n, addr) \
17350 do { \
17351 BUG_ON((unsigned)n > 0xFF); \
17352- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
17353+ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
17354 __KERNEL_CS); \
17355- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
17356+ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
17357 0, 0, __KERNEL_CS); \
17358 } while (0)
17359
17360@@ -401,19 +414,19 @@ static inline void alloc_system_vector(int vector)
17361 /*
17362 * This routine sets up an interrupt gate at directory privilege level 3.
17363 */
17364-static inline void set_system_intr_gate(unsigned int n, void *addr)
17365+static inline void set_system_intr_gate(unsigned int n, const void *addr)
17366 {
17367 BUG_ON((unsigned)n > 0xFF);
17368 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
17369 }
17370
17371-static inline void set_system_trap_gate(unsigned int n, void *addr)
17372+static inline void set_system_trap_gate(unsigned int n, const void *addr)
17373 {
17374 BUG_ON((unsigned)n > 0xFF);
17375 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
17376 }
17377
17378-static inline void set_trap_gate(unsigned int n, void *addr)
17379+static inline void set_trap_gate(unsigned int n, const void *addr)
17380 {
17381 BUG_ON((unsigned)n > 0xFF);
17382 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
17383@@ -422,16 +435,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
17384 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
17385 {
17386 BUG_ON((unsigned)n > 0xFF);
17387- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
17388+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
17389 }
17390
17391-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
17392+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
17393 {
17394 BUG_ON((unsigned)n > 0xFF);
17395 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
17396 }
17397
17398-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
17399+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
17400 {
17401 BUG_ON((unsigned)n > 0xFF);
17402 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
17403@@ -503,4 +516,17 @@ static inline void load_current_idt(void)
17404 else
17405 load_idt((const struct desc_ptr *)&idt_descr);
17406 }
17407+
17408+#ifdef CONFIG_X86_32
17409+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
17410+{
17411+ struct desc_struct d;
17412+
17413+ if (likely(limit))
17414+ limit = (limit - 1UL) >> PAGE_SHIFT;
17415+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
17416+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
17417+}
17418+#endif
17419+
17420 #endif /* _ASM_X86_DESC_H */
17421diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
17422index 278441f..b95a174 100644
17423--- a/arch/x86/include/asm/desc_defs.h
17424+++ b/arch/x86/include/asm/desc_defs.h
17425@@ -31,6 +31,12 @@ struct desc_struct {
17426 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
17427 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
17428 };
17429+ struct {
17430+ u16 offset_low;
17431+ u16 seg;
17432+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
17433+ unsigned offset_high: 16;
17434+ } gate;
17435 };
17436 } __attribute__((packed));
17437
17438diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
17439index ced283a..ffe04cc 100644
17440--- a/arch/x86/include/asm/div64.h
17441+++ b/arch/x86/include/asm/div64.h
17442@@ -39,7 +39,7 @@
17443 __mod; \
17444 })
17445
17446-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
17447+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
17448 {
17449 union {
17450 u64 v64;
17451diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
17452index 1a055c8..1a5082a 100644
17453--- a/arch/x86/include/asm/elf.h
17454+++ b/arch/x86/include/asm/elf.h
17455@@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
17456
17457 #include <asm/vdso.h>
17458
17459-#ifdef CONFIG_X86_64
17460-extern unsigned int vdso64_enabled;
17461-#endif
17462 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
17463 extern unsigned int vdso32_enabled;
17464 #endif
17465@@ -160,8 +157,9 @@ do { \
17466 #define elf_check_arch(x) \
17467 ((x)->e_machine == EM_X86_64)
17468
17469-#define compat_elf_check_arch(x) \
17470- (elf_check_arch_ia32(x) || (x)->e_machine == EM_X86_64)
17471+#define compat_elf_check_arch(x) \
17472+ (elf_check_arch_ia32(x) || \
17473+ (IS_ENABLED(CONFIG_X86_X32_ABI) && (x)->e_machine == EM_X86_64))
17474
17475 #if __USER32_DS != __USER_DS
17476 # error "The following code assumes __USER32_DS == __USER_DS"
17477@@ -248,7 +246,25 @@ extern int force_personality32;
17478 the loader. We need to make sure that it is out of the way of the program
17479 that it will "exec", and that there is sufficient room for the brk. */
17480
17481+#ifdef CONFIG_PAX_SEGMEXEC
17482+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
17483+#else
17484 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
17485+#endif
17486+
17487+#ifdef CONFIG_PAX_ASLR
17488+#ifdef CONFIG_X86_32
17489+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
17490+
17491+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
17492+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
17493+#else
17494+#define PAX_ELF_ET_DYN_BASE 0x400000UL
17495+
17496+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
17497+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
17498+#endif
17499+#endif
17500
17501 /* This yields a mask that user programs can use to figure out what
17502 instruction set this CPU supports. This could be done in user space,
17503@@ -297,17 +313,13 @@ do { \
17504
17505 #define ARCH_DLINFO \
17506 do { \
17507- if (vdso64_enabled) \
17508- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
17509- (unsigned long __force)current->mm->context.vdso); \
17510+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
17511 } while (0)
17512
17513 /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
17514 #define ARCH_DLINFO_X32 \
17515 do { \
17516- if (vdso64_enabled) \
17517- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
17518- (unsigned long __force)current->mm->context.vdso); \
17519+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
17520 } while (0)
17521
17522 #define AT_SYSINFO 32
17523@@ -322,10 +334,10 @@ else \
17524
17525 #endif /* !CONFIG_X86_32 */
17526
17527-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
17528+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
17529
17530 #define VDSO_ENTRY \
17531- ((unsigned long)current->mm->context.vdso + \
17532+ (current->mm->context.vdso + \
17533 selected_vdso32->sym___kernel_vsyscall)
17534
17535 struct linux_binprm;
17536@@ -337,9 +349,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
17537 int uses_interp);
17538 #define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
17539
17540-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
17541-#define arch_randomize_brk arch_randomize_brk
17542-
17543 /*
17544 * True on X86_32 or when emulating IA32 on X86_64
17545 */
17546diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
17547index 77a99ac..39ff7f5 100644
17548--- a/arch/x86/include/asm/emergency-restart.h
17549+++ b/arch/x86/include/asm/emergency-restart.h
17550@@ -1,6 +1,6 @@
17551 #ifndef _ASM_X86_EMERGENCY_RESTART_H
17552 #define _ASM_X86_EMERGENCY_RESTART_H
17553
17554-extern void machine_emergency_restart(void);
17555+extern void machine_emergency_restart(void) __noreturn;
17556
17557 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
17558diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
17559index 1c7eefe..d0e4702 100644
17560--- a/arch/x86/include/asm/floppy.h
17561+++ b/arch/x86/include/asm/floppy.h
17562@@ -229,18 +229,18 @@ static struct fd_routine_l {
17563 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
17564 } fd_routine[] = {
17565 {
17566- request_dma,
17567- free_dma,
17568- get_dma_residue,
17569- dma_mem_alloc,
17570- hard_dma_setup
17571+ ._request_dma = request_dma,
17572+ ._free_dma = free_dma,
17573+ ._get_dma_residue = get_dma_residue,
17574+ ._dma_mem_alloc = dma_mem_alloc,
17575+ ._dma_setup = hard_dma_setup
17576 },
17577 {
17578- vdma_request_dma,
17579- vdma_nop,
17580- vdma_get_dma_residue,
17581- vdma_mem_alloc,
17582- vdma_dma_setup
17583+ ._request_dma = vdma_request_dma,
17584+ ._free_dma = vdma_nop,
17585+ ._get_dma_residue = vdma_get_dma_residue,
17586+ ._dma_mem_alloc = vdma_mem_alloc,
17587+ ._dma_setup = vdma_dma_setup
17588 }
17589 };
17590
17591diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
17592index 412ecec..c1ea43a 100644
17593--- a/arch/x86/include/asm/fpu-internal.h
17594+++ b/arch/x86/include/asm/fpu-internal.h
17595@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
17596 #define user_insn(insn, output, input...) \
17597 ({ \
17598 int err; \
17599+ pax_open_userland(); \
17600 asm volatile(ASM_STAC "\n" \
17601- "1:" #insn "\n\t" \
17602+ "1:" \
17603+ __copyuser_seg \
17604+ #insn "\n\t" \
17605 "2: " ASM_CLAC "\n" \
17606 ".section .fixup,\"ax\"\n" \
17607 "3: movl $-1,%[err]\n" \
17608@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
17609 _ASM_EXTABLE(1b, 3b) \
17610 : [err] "=r" (err), output \
17611 : "0"(0), input); \
17612+ pax_close_userland(); \
17613 err; \
17614 })
17615
17616@@ -298,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
17617 "fnclex\n\t"
17618 "emms\n\t"
17619 "fildl %P[addr]" /* set F?P to defined value */
17620- : : [addr] "m" (tsk->thread.fpu.has_fpu));
17621+ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
17622 }
17623
17624 return fpu_restore_checking(&tsk->thread.fpu);
17625diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
17626index b4c1f54..e290c08 100644
17627--- a/arch/x86/include/asm/futex.h
17628+++ b/arch/x86/include/asm/futex.h
17629@@ -12,6 +12,7 @@
17630 #include <asm/smap.h>
17631
17632 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
17633+ typecheck(u32 __user *, uaddr); \
17634 asm volatile("\t" ASM_STAC "\n" \
17635 "1:\t" insn "\n" \
17636 "2:\t" ASM_CLAC "\n" \
17637@@ -20,15 +21,16 @@
17638 "\tjmp\t2b\n" \
17639 "\t.previous\n" \
17640 _ASM_EXTABLE(1b, 3b) \
17641- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
17642+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
17643 : "i" (-EFAULT), "0" (oparg), "1" (0))
17644
17645 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
17646+ typecheck(u32 __user *, uaddr); \
17647 asm volatile("\t" ASM_STAC "\n" \
17648 "1:\tmovl %2, %0\n" \
17649 "\tmovl\t%0, %3\n" \
17650 "\t" insn "\n" \
17651- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
17652+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
17653 "\tjnz\t1b\n" \
17654 "3:\t" ASM_CLAC "\n" \
17655 "\t.section .fixup,\"ax\"\n" \
17656@@ -38,7 +40,7 @@
17657 _ASM_EXTABLE(1b, 4b) \
17658 _ASM_EXTABLE(2b, 4b) \
17659 : "=&a" (oldval), "=&r" (ret), \
17660- "+m" (*uaddr), "=&r" (tem) \
17661+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
17662 : "r" (oparg), "i" (-EFAULT), "1" (0))
17663
17664 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17665@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17666
17667 pagefault_disable();
17668
17669+ pax_open_userland();
17670 switch (op) {
17671 case FUTEX_OP_SET:
17672- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
17673+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
17674 break;
17675 case FUTEX_OP_ADD:
17676- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
17677+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
17678 uaddr, oparg);
17679 break;
17680 case FUTEX_OP_OR:
17681@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17682 default:
17683 ret = -ENOSYS;
17684 }
17685+ pax_close_userland();
17686
17687 pagefault_enable();
17688
17689diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
17690index 4615906..788c817 100644
17691--- a/arch/x86/include/asm/hw_irq.h
17692+++ b/arch/x86/include/asm/hw_irq.h
17693@@ -164,8 +164,8 @@ extern void setup_ioapic_dest(void);
17694 extern void enable_IO_APIC(void);
17695
17696 /* Statistics */
17697-extern atomic_t irq_err_count;
17698-extern atomic_t irq_mis_count;
17699+extern atomic_unchecked_t irq_err_count;
17700+extern atomic_unchecked_t irq_mis_count;
17701
17702 /* EISA */
17703 extern void eisa_set_level_irq(unsigned int irq);
17704diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
17705index ccffa53..3c90c87 100644
17706--- a/arch/x86/include/asm/i8259.h
17707+++ b/arch/x86/include/asm/i8259.h
17708@@ -62,7 +62,7 @@ struct legacy_pic {
17709 void (*init)(int auto_eoi);
17710 int (*irq_pending)(unsigned int irq);
17711 void (*make_irq)(unsigned int irq);
17712-};
17713+} __do_const;
17714
17715 extern struct legacy_pic *legacy_pic;
17716 extern struct legacy_pic null_legacy_pic;
17717diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
17718index b8237d8..3e8864e 100644
17719--- a/arch/x86/include/asm/io.h
17720+++ b/arch/x86/include/asm/io.h
17721@@ -52,12 +52,12 @@ static inline void name(type val, volatile void __iomem *addr) \
17722 "m" (*(volatile type __force *)addr) barrier); }
17723
17724 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
17725-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
17726-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
17727+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
17728+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
17729
17730 build_mmio_read(__readb, "b", unsigned char, "=q", )
17731-build_mmio_read(__readw, "w", unsigned short, "=r", )
17732-build_mmio_read(__readl, "l", unsigned int, "=r", )
17733+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
17734+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
17735
17736 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
17737 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
17738@@ -109,7 +109,7 @@ build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
17739 * this function
17740 */
17741
17742-static inline phys_addr_t virt_to_phys(volatile void *address)
17743+static inline phys_addr_t __intentional_overflow(-1) virt_to_phys(volatile void *address)
17744 {
17745 return __pa(address);
17746 }
17747@@ -185,7 +185,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
17748 return ioremap_nocache(offset, size);
17749 }
17750
17751-extern void iounmap(volatile void __iomem *addr);
17752+extern void iounmap(const volatile void __iomem *addr);
17753
17754 extern void set_iounmap_nonlazy(void);
17755
17756@@ -195,6 +195,17 @@ extern void set_iounmap_nonlazy(void);
17757
17758 #include <linux/vmalloc.h>
17759
17760+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
17761+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
17762+{
17763+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17764+}
17765+
17766+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
17767+{
17768+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17769+}
17770+
17771 /*
17772 * Convert a virtual cached pointer to an uncached pointer
17773 */
17774diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
17775index 0a8b519..80e7d5b 100644
17776--- a/arch/x86/include/asm/irqflags.h
17777+++ b/arch/x86/include/asm/irqflags.h
17778@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
17779 sti; \
17780 sysexit
17781
17782+#define GET_CR0_INTO_RDI mov %cr0, %rdi
17783+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
17784+#define GET_CR3_INTO_RDI mov %cr3, %rdi
17785+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
17786+
17787 #else
17788 #define INTERRUPT_RETURN iret
17789 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
17790diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
17791index 53cdfb2..d1369e6 100644
17792--- a/arch/x86/include/asm/kprobes.h
17793+++ b/arch/x86/include/asm/kprobes.h
17794@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
17795 #define RELATIVEJUMP_SIZE 5
17796 #define RELATIVECALL_OPCODE 0xe8
17797 #define RELATIVE_ADDR_SIZE 4
17798-#define MAX_STACK_SIZE 64
17799-#define MIN_STACK_SIZE(ADDR) \
17800- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
17801- THREAD_SIZE - (unsigned long)(ADDR))) \
17802- ? (MAX_STACK_SIZE) \
17803- : (((unsigned long)current_thread_info()) + \
17804- THREAD_SIZE - (unsigned long)(ADDR)))
17805+#define MAX_STACK_SIZE 64UL
17806+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
17807
17808 #define flush_insn_slot(p) do { } while (0)
17809
17810diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
17811index 92d3486..0d47ae1 100644
17812--- a/arch/x86/include/asm/kvm_host.h
17813+++ b/arch/x86/include/asm/kvm_host.h
17814@@ -991,6 +991,20 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
17815 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
17816 }
17817
17818+static inline u64 get_canonical(u64 la)
17819+{
17820+ return ((int64_t)la << 16) >> 16;
17821+}
17822+
17823+static inline bool is_noncanonical_address(u64 la)
17824+{
17825+#ifdef CONFIG_X86_64
17826+ return get_canonical(la) != la;
17827+#else
17828+ return false;
17829+#endif
17830+}
17831+
17832 #define TSS_IOPB_BASE_OFFSET 0x66
17833 #define TSS_BASE_SIZE 0x68
17834 #define TSS_IOPB_SIZE (65536 / 8)
17835@@ -1049,7 +1063,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
17836 void kvm_vcpu_reset(struct kvm_vcpu *vcpu);
17837
17838 void kvm_define_shared_msr(unsigned index, u32 msr);
17839-void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
17840+int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
17841
17842 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
17843
17844diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
17845index 4ad6560..75c7bdd 100644
17846--- a/arch/x86/include/asm/local.h
17847+++ b/arch/x86/include/asm/local.h
17848@@ -10,33 +10,97 @@ typedef struct {
17849 atomic_long_t a;
17850 } local_t;
17851
17852+typedef struct {
17853+ atomic_long_unchecked_t a;
17854+} local_unchecked_t;
17855+
17856 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
17857
17858 #define local_read(l) atomic_long_read(&(l)->a)
17859+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
17860 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
17861+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
17862
17863 static inline void local_inc(local_t *l)
17864 {
17865- asm volatile(_ASM_INC "%0"
17866+ asm volatile(_ASM_INC "%0\n"
17867+
17868+#ifdef CONFIG_PAX_REFCOUNT
17869+ "jno 0f\n"
17870+ _ASM_DEC "%0\n"
17871+ "int $4\n0:\n"
17872+ _ASM_EXTABLE(0b, 0b)
17873+#endif
17874+
17875+ : "+m" (l->a.counter));
17876+}
17877+
17878+static inline void local_inc_unchecked(local_unchecked_t *l)
17879+{
17880+ asm volatile(_ASM_INC "%0\n"
17881 : "+m" (l->a.counter));
17882 }
17883
17884 static inline void local_dec(local_t *l)
17885 {
17886- asm volatile(_ASM_DEC "%0"
17887+ asm volatile(_ASM_DEC "%0\n"
17888+
17889+#ifdef CONFIG_PAX_REFCOUNT
17890+ "jno 0f\n"
17891+ _ASM_INC "%0\n"
17892+ "int $4\n0:\n"
17893+ _ASM_EXTABLE(0b, 0b)
17894+#endif
17895+
17896+ : "+m" (l->a.counter));
17897+}
17898+
17899+static inline void local_dec_unchecked(local_unchecked_t *l)
17900+{
17901+ asm volatile(_ASM_DEC "%0\n"
17902 : "+m" (l->a.counter));
17903 }
17904
17905 static inline void local_add(long i, local_t *l)
17906 {
17907- asm volatile(_ASM_ADD "%1,%0"
17908+ asm volatile(_ASM_ADD "%1,%0\n"
17909+
17910+#ifdef CONFIG_PAX_REFCOUNT
17911+ "jno 0f\n"
17912+ _ASM_SUB "%1,%0\n"
17913+ "int $4\n0:\n"
17914+ _ASM_EXTABLE(0b, 0b)
17915+#endif
17916+
17917+ : "+m" (l->a.counter)
17918+ : "ir" (i));
17919+}
17920+
17921+static inline void local_add_unchecked(long i, local_unchecked_t *l)
17922+{
17923+ asm volatile(_ASM_ADD "%1,%0\n"
17924 : "+m" (l->a.counter)
17925 : "ir" (i));
17926 }
17927
17928 static inline void local_sub(long i, local_t *l)
17929 {
17930- asm volatile(_ASM_SUB "%1,%0"
17931+ asm volatile(_ASM_SUB "%1,%0\n"
17932+
17933+#ifdef CONFIG_PAX_REFCOUNT
17934+ "jno 0f\n"
17935+ _ASM_ADD "%1,%0\n"
17936+ "int $4\n0:\n"
17937+ _ASM_EXTABLE(0b, 0b)
17938+#endif
17939+
17940+ : "+m" (l->a.counter)
17941+ : "ir" (i));
17942+}
17943+
17944+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
17945+{
17946+ asm volatile(_ASM_SUB "%1,%0\n"
17947 : "+m" (l->a.counter)
17948 : "ir" (i));
17949 }
17950@@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
17951 */
17952 static inline int local_sub_and_test(long i, local_t *l)
17953 {
17954- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
17955+ GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
17956 }
17957
17958 /**
17959@@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
17960 */
17961 static inline int local_dec_and_test(local_t *l)
17962 {
17963- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
17964+ GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
17965 }
17966
17967 /**
17968@@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
17969 */
17970 static inline int local_inc_and_test(local_t *l)
17971 {
17972- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
17973+ GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
17974 }
17975
17976 /**
17977@@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
17978 */
17979 static inline int local_add_negative(long i, local_t *l)
17980 {
17981- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
17982+ GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
17983 }
17984
17985 /**
17986@@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l)
17987 static inline long local_add_return(long i, local_t *l)
17988 {
17989 long __i = i;
17990+ asm volatile(_ASM_XADD "%0, %1\n"
17991+
17992+#ifdef CONFIG_PAX_REFCOUNT
17993+ "jno 0f\n"
17994+ _ASM_MOV "%0,%1\n"
17995+ "int $4\n0:\n"
17996+ _ASM_EXTABLE(0b, 0b)
17997+#endif
17998+
17999+ : "+r" (i), "+m" (l->a.counter)
18000+ : : "memory");
18001+ return i + __i;
18002+}
18003+
18004+/**
18005+ * local_add_return_unchecked - add and return
18006+ * @i: integer value to add
18007+ * @l: pointer to type local_unchecked_t
18008+ *
18009+ * Atomically adds @i to @l and returns @i + @l
18010+ */
18011+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
18012+{
18013+ long __i = i;
18014 asm volatile(_ASM_XADD "%0, %1;"
18015 : "+r" (i), "+m" (l->a.counter)
18016 : : "memory");
18017@@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
18018
18019 #define local_cmpxchg(l, o, n) \
18020 (cmpxchg_local(&((l)->a.counter), (o), (n)))
18021+#define local_cmpxchg_unchecked(l, o, n) \
18022+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
18023 /* Always has a lock prefix */
18024 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
18025
18026diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
18027new file mode 100644
18028index 0000000..2bfd3ba
18029--- /dev/null
18030+++ b/arch/x86/include/asm/mman.h
18031@@ -0,0 +1,15 @@
18032+#ifndef _X86_MMAN_H
18033+#define _X86_MMAN_H
18034+
18035+#include <uapi/asm/mman.h>
18036+
18037+#ifdef __KERNEL__
18038+#ifndef __ASSEMBLY__
18039+#ifdef CONFIG_X86_32
18040+#define arch_mmap_check i386_mmap_check
18041+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
18042+#endif
18043+#endif
18044+#endif
18045+
18046+#endif /* X86_MMAN_H */
18047diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
18048index 876e74e..e20bfb1 100644
18049--- a/arch/x86/include/asm/mmu.h
18050+++ b/arch/x86/include/asm/mmu.h
18051@@ -9,7 +9,7 @@
18052 * we put the segment information here.
18053 */
18054 typedef struct {
18055- void *ldt;
18056+ struct desc_struct *ldt;
18057 int size;
18058
18059 #ifdef CONFIG_X86_64
18060@@ -18,7 +18,19 @@ typedef struct {
18061 #endif
18062
18063 struct mutex lock;
18064- void __user *vdso;
18065+ unsigned long vdso;
18066+
18067+#ifdef CONFIG_X86_32
18068+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18069+ unsigned long user_cs_base;
18070+ unsigned long user_cs_limit;
18071+
18072+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18073+ cpumask_t cpu_user_cs_mask;
18074+#endif
18075+
18076+#endif
18077+#endif
18078 } mm_context_t;
18079
18080 #ifdef CONFIG_SMP
18081diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
18082index 166af2a..648c200 100644
18083--- a/arch/x86/include/asm/mmu_context.h
18084+++ b/arch/x86/include/asm/mmu_context.h
18085@@ -28,6 +28,20 @@ void destroy_context(struct mm_struct *mm);
18086
18087 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
18088 {
18089+
18090+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18091+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
18092+ unsigned int i;
18093+ pgd_t *pgd;
18094+
18095+ pax_open_kernel();
18096+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
18097+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
18098+ set_pgd_batched(pgd+i, native_make_pgd(0));
18099+ pax_close_kernel();
18100+ }
18101+#endif
18102+
18103 #ifdef CONFIG_SMP
18104 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
18105 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
18106@@ -38,16 +52,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
18107 struct task_struct *tsk)
18108 {
18109 unsigned cpu = smp_processor_id();
18110+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
18111+ int tlbstate = TLBSTATE_OK;
18112+#endif
18113
18114 if (likely(prev != next)) {
18115 #ifdef CONFIG_SMP
18116+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
18117+ tlbstate = this_cpu_read(cpu_tlbstate.state);
18118+#endif
18119 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
18120 this_cpu_write(cpu_tlbstate.active_mm, next);
18121 #endif
18122 cpumask_set_cpu(cpu, mm_cpumask(next));
18123
18124 /* Re-load page tables */
18125+#ifdef CONFIG_PAX_PER_CPU_PGD
18126+ pax_open_kernel();
18127+
18128+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18129+ if (static_cpu_has(X86_FEATURE_PCID))
18130+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
18131+ else
18132+#endif
18133+
18134+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
18135+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
18136+ pax_close_kernel();
18137+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
18138+
18139+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18140+ if (static_cpu_has(X86_FEATURE_PCID)) {
18141+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
18142+ u64 descriptor[2];
18143+ descriptor[0] = PCID_USER;
18144+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
18145+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
18146+ descriptor[0] = PCID_KERNEL;
18147+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
18148+ }
18149+ } else {
18150+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
18151+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
18152+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
18153+ else
18154+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
18155+ }
18156+ } else
18157+#endif
18158+
18159+ load_cr3(get_cpu_pgd(cpu, kernel));
18160+#else
18161 load_cr3(next->pgd);
18162+#endif
18163 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
18164
18165 /* Stop flush ipis for the previous mm */
18166@@ -56,9 +113,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
18167 /* Load the LDT, if the LDT is different: */
18168 if (unlikely(prev->context.ldt != next->context.ldt))
18169 load_LDT_nolock(&next->context);
18170+
18171+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18172+ if (!(__supported_pte_mask & _PAGE_NX)) {
18173+ smp_mb__before_atomic();
18174+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
18175+ smp_mb__after_atomic();
18176+ cpu_set(cpu, next->context.cpu_user_cs_mask);
18177+ }
18178+#endif
18179+
18180+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
18181+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
18182+ prev->context.user_cs_limit != next->context.user_cs_limit))
18183+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
18184+#ifdef CONFIG_SMP
18185+ else if (unlikely(tlbstate != TLBSTATE_OK))
18186+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
18187+#endif
18188+#endif
18189+
18190 }
18191+ else {
18192+
18193+#ifdef CONFIG_PAX_PER_CPU_PGD
18194+ pax_open_kernel();
18195+
18196+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18197+ if (static_cpu_has(X86_FEATURE_PCID))
18198+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
18199+ else
18200+#endif
18201+
18202+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
18203+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
18204+ pax_close_kernel();
18205+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
18206+
18207+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18208+ if (static_cpu_has(X86_FEATURE_PCID)) {
18209+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
18210+ u64 descriptor[2];
18211+ descriptor[0] = PCID_USER;
18212+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
18213+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
18214+ descriptor[0] = PCID_KERNEL;
18215+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
18216+ }
18217+ } else {
18218+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
18219+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
18220+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
18221+ else
18222+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
18223+ }
18224+ } else
18225+#endif
18226+
18227+ load_cr3(get_cpu_pgd(cpu, kernel));
18228+#endif
18229+
18230 #ifdef CONFIG_SMP
18231- else {
18232 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
18233 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
18234
18235@@ -75,12 +190,29 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
18236 * tlb flush IPI delivery. We must reload CR3
18237 * to make sure to use no freed page tables.
18238 */
18239+
18240+#ifndef CONFIG_PAX_PER_CPU_PGD
18241 load_cr3(next->pgd);
18242 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
18243+#endif
18244+
18245 load_LDT_nolock(&next->context);
18246+
18247+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18248+ if (!(__supported_pte_mask & _PAGE_NX))
18249+ cpu_set(cpu, next->context.cpu_user_cs_mask);
18250+#endif
18251+
18252+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
18253+#ifdef CONFIG_PAX_PAGEEXEC
18254+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
18255+#endif
18256+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
18257+#endif
18258+
18259 }
18260+#endif
18261 }
18262-#endif
18263 }
18264
18265 #define activate_mm(prev, next) \
18266diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
18267index e3b7819..b257c64 100644
18268--- a/arch/x86/include/asm/module.h
18269+++ b/arch/x86/include/asm/module.h
18270@@ -5,6 +5,7 @@
18271
18272 #ifdef CONFIG_X86_64
18273 /* X86_64 does not define MODULE_PROC_FAMILY */
18274+#define MODULE_PROC_FAMILY ""
18275 #elif defined CONFIG_M486
18276 #define MODULE_PROC_FAMILY "486 "
18277 #elif defined CONFIG_M586
18278@@ -57,8 +58,20 @@
18279 #error unknown processor family
18280 #endif
18281
18282-#ifdef CONFIG_X86_32
18283-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
18284+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
18285+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
18286+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
18287+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
18288+#else
18289+#define MODULE_PAX_KERNEXEC ""
18290 #endif
18291
18292+#ifdef CONFIG_PAX_MEMORY_UDEREF
18293+#define MODULE_PAX_UDEREF "UDEREF "
18294+#else
18295+#define MODULE_PAX_UDEREF ""
18296+#endif
18297+
18298+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
18299+
18300 #endif /* _ASM_X86_MODULE_H */
18301diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
18302index 5f2fc44..106caa6 100644
18303--- a/arch/x86/include/asm/nmi.h
18304+++ b/arch/x86/include/asm/nmi.h
18305@@ -36,26 +36,35 @@ enum {
18306
18307 typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
18308
18309+struct nmiaction;
18310+
18311+struct nmiwork {
18312+ const struct nmiaction *action;
18313+ u64 max_duration;
18314+ struct irq_work irq_work;
18315+};
18316+
18317 struct nmiaction {
18318 struct list_head list;
18319 nmi_handler_t handler;
18320- u64 max_duration;
18321- struct irq_work irq_work;
18322 unsigned long flags;
18323 const char *name;
18324-};
18325+ struct nmiwork *work;
18326+} __do_const;
18327
18328 #define register_nmi_handler(t, fn, fg, n, init...) \
18329 ({ \
18330- static struct nmiaction init fn##_na = { \
18331+ static struct nmiwork fn##_nw; \
18332+ static const struct nmiaction init fn##_na = { \
18333 .handler = (fn), \
18334 .name = (n), \
18335 .flags = (fg), \
18336+ .work = &fn##_nw, \
18337 }; \
18338 __register_nmi_handler((t), &fn##_na); \
18339 })
18340
18341-int __register_nmi_handler(unsigned int, struct nmiaction *);
18342+int __register_nmi_handler(unsigned int, const struct nmiaction *);
18343
18344 void unregister_nmi_handler(unsigned int, const char *);
18345
18346diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
18347index 802dde3..9183e68 100644
18348--- a/arch/x86/include/asm/page.h
18349+++ b/arch/x86/include/asm/page.h
18350@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
18351 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
18352
18353 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
18354+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
18355
18356 #define __boot_va(x) __va(x)
18357 #define __boot_pa(x) __pa(x)
18358@@ -60,11 +61,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
18359 * virt_to_page(kaddr) returns a valid pointer if and only if
18360 * virt_addr_valid(kaddr) returns true.
18361 */
18362-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
18363 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
18364 extern bool __virt_addr_valid(unsigned long kaddr);
18365 #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
18366
18367+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
18368+#define virt_to_page(kaddr) \
18369+ ({ \
18370+ const void *__kaddr = (const void *)(kaddr); \
18371+ BUG_ON(!virt_addr_valid(__kaddr)); \
18372+ pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \
18373+ })
18374+#else
18375+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
18376+#endif
18377+
18378 #endif /* __ASSEMBLY__ */
18379
18380 #include <asm-generic/memory_model.h>
18381diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
18382index f408caf..4a0455e 100644
18383--- a/arch/x86/include/asm/page_64.h
18384+++ b/arch/x86/include/asm/page_64.h
18385@@ -7,9 +7,9 @@
18386
18387 /* duplicated to the one in bootmem.h */
18388 extern unsigned long max_pfn;
18389-extern unsigned long phys_base;
18390+extern const unsigned long phys_base;
18391
18392-static inline unsigned long __phys_addr_nodebug(unsigned long x)
18393+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
18394 {
18395 unsigned long y = x - __START_KERNEL_map;
18396
18397diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
18398index cd6e1610..70f4418 100644
18399--- a/arch/x86/include/asm/paravirt.h
18400+++ b/arch/x86/include/asm/paravirt.h
18401@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
18402 return (pmd_t) { ret };
18403 }
18404
18405-static inline pmdval_t pmd_val(pmd_t pmd)
18406+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
18407 {
18408 pmdval_t ret;
18409
18410@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
18411 val);
18412 }
18413
18414+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
18415+{
18416+ pgdval_t val = native_pgd_val(pgd);
18417+
18418+ if (sizeof(pgdval_t) > sizeof(long))
18419+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
18420+ val, (u64)val >> 32);
18421+ else
18422+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
18423+ val);
18424+}
18425+
18426 static inline void pgd_clear(pgd_t *pgdp)
18427 {
18428 set_pgd(pgdp, __pgd(0));
18429@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
18430 pv_mmu_ops.set_fixmap(idx, phys, flags);
18431 }
18432
18433+#ifdef CONFIG_PAX_KERNEXEC
18434+static inline unsigned long pax_open_kernel(void)
18435+{
18436+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
18437+}
18438+
18439+static inline unsigned long pax_close_kernel(void)
18440+{
18441+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
18442+}
18443+#else
18444+static inline unsigned long pax_open_kernel(void) { return 0; }
18445+static inline unsigned long pax_close_kernel(void) { return 0; }
18446+#endif
18447+
18448 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
18449
18450 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
18451@@ -906,7 +933,7 @@ extern void default_banner(void);
18452
18453 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
18454 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
18455-#define PARA_INDIRECT(addr) *%cs:addr
18456+#define PARA_INDIRECT(addr) *%ss:addr
18457 #endif
18458
18459 #define INTERRUPT_RETURN \
18460@@ -981,6 +1008,21 @@ extern void default_banner(void);
18461 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
18462 CLBR_NONE, \
18463 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
18464+
18465+#define GET_CR0_INTO_RDI \
18466+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
18467+ mov %rax,%rdi
18468+
18469+#define SET_RDI_INTO_CR0 \
18470+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
18471+
18472+#define GET_CR3_INTO_RDI \
18473+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
18474+ mov %rax,%rdi
18475+
18476+#define SET_RDI_INTO_CR3 \
18477+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
18478+
18479 #endif /* CONFIG_X86_32 */
18480
18481 #endif /* __ASSEMBLY__ */
18482diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
18483index 7549b8b..f0edfda 100644
18484--- a/arch/x86/include/asm/paravirt_types.h
18485+++ b/arch/x86/include/asm/paravirt_types.h
18486@@ -84,7 +84,7 @@ struct pv_init_ops {
18487 */
18488 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
18489 unsigned long addr, unsigned len);
18490-};
18491+} __no_const __no_randomize_layout;
18492
18493
18494 struct pv_lazy_ops {
18495@@ -92,13 +92,13 @@ struct pv_lazy_ops {
18496 void (*enter)(void);
18497 void (*leave)(void);
18498 void (*flush)(void);
18499-};
18500+} __no_randomize_layout;
18501
18502 struct pv_time_ops {
18503 unsigned long long (*sched_clock)(void);
18504 unsigned long long (*steal_clock)(int cpu);
18505 unsigned long (*get_tsc_khz)(void);
18506-};
18507+} __no_const __no_randomize_layout;
18508
18509 struct pv_cpu_ops {
18510 /* hooks for various privileged instructions */
18511@@ -192,7 +192,7 @@ struct pv_cpu_ops {
18512
18513 void (*start_context_switch)(struct task_struct *prev);
18514 void (*end_context_switch)(struct task_struct *next);
18515-};
18516+} __no_const __no_randomize_layout;
18517
18518 struct pv_irq_ops {
18519 /*
18520@@ -215,7 +215,7 @@ struct pv_irq_ops {
18521 #ifdef CONFIG_X86_64
18522 void (*adjust_exception_frame)(void);
18523 #endif
18524-};
18525+} __no_randomize_layout;
18526
18527 struct pv_apic_ops {
18528 #ifdef CONFIG_X86_LOCAL_APIC
18529@@ -223,7 +223,7 @@ struct pv_apic_ops {
18530 unsigned long start_eip,
18531 unsigned long start_esp);
18532 #endif
18533-};
18534+} __no_const __no_randomize_layout;
18535
18536 struct pv_mmu_ops {
18537 unsigned long (*read_cr2)(void);
18538@@ -313,6 +313,7 @@ struct pv_mmu_ops {
18539 struct paravirt_callee_save make_pud;
18540
18541 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
18542+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
18543 #endif /* PAGETABLE_LEVELS == 4 */
18544 #endif /* PAGETABLE_LEVELS >= 3 */
18545
18546@@ -324,7 +325,13 @@ struct pv_mmu_ops {
18547 an mfn. We can tell which is which from the index. */
18548 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
18549 phys_addr_t phys, pgprot_t flags);
18550-};
18551+
18552+#ifdef CONFIG_PAX_KERNEXEC
18553+ unsigned long (*pax_open_kernel)(void);
18554+ unsigned long (*pax_close_kernel)(void);
18555+#endif
18556+
18557+} __no_randomize_layout;
18558
18559 struct arch_spinlock;
18560 #ifdef CONFIG_SMP
18561@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
18562 struct pv_lock_ops {
18563 struct paravirt_callee_save lock_spinning;
18564 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
18565-};
18566+} __no_randomize_layout;
18567
18568 /* This contains all the paravirt structures: we get a convenient
18569 * number for each function using the offset which we use to indicate
18570- * what to patch. */
18571+ * what to patch.
18572+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
18573+ */
18574+
18575 struct paravirt_patch_template {
18576 struct pv_init_ops pv_init_ops;
18577 struct pv_time_ops pv_time_ops;
18578@@ -349,7 +359,7 @@ struct paravirt_patch_template {
18579 struct pv_apic_ops pv_apic_ops;
18580 struct pv_mmu_ops pv_mmu_ops;
18581 struct pv_lock_ops pv_lock_ops;
18582-};
18583+} __no_randomize_layout;
18584
18585 extern struct pv_info pv_info;
18586 extern struct pv_init_ops pv_init_ops;
18587diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
18588index c4412e9..90e88c5 100644
18589--- a/arch/x86/include/asm/pgalloc.h
18590+++ b/arch/x86/include/asm/pgalloc.h
18591@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
18592 pmd_t *pmd, pte_t *pte)
18593 {
18594 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
18595+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
18596+}
18597+
18598+static inline void pmd_populate_user(struct mm_struct *mm,
18599+ pmd_t *pmd, pte_t *pte)
18600+{
18601+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
18602 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
18603 }
18604
18605@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
18606
18607 #ifdef CONFIG_X86_PAE
18608 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
18609+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
18610+{
18611+ pud_populate(mm, pudp, pmd);
18612+}
18613 #else /* !CONFIG_X86_PAE */
18614 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
18615 {
18616 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
18617 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
18618 }
18619+
18620+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
18621+{
18622+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
18623+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
18624+}
18625 #endif /* CONFIG_X86_PAE */
18626
18627 #if PAGETABLE_LEVELS > 3
18628@@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
18629 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
18630 }
18631
18632+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
18633+{
18634+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
18635+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
18636+}
18637+
18638 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
18639 {
18640 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
18641diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
18642index 206a87f..1623b06 100644
18643--- a/arch/x86/include/asm/pgtable-2level.h
18644+++ b/arch/x86/include/asm/pgtable-2level.h
18645@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
18646
18647 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18648 {
18649+ pax_open_kernel();
18650 *pmdp = pmd;
18651+ pax_close_kernel();
18652 }
18653
18654 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18655diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
18656index 81bb91b..9392125 100644
18657--- a/arch/x86/include/asm/pgtable-3level.h
18658+++ b/arch/x86/include/asm/pgtable-3level.h
18659@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18660
18661 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18662 {
18663+ pax_open_kernel();
18664 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
18665+ pax_close_kernel();
18666 }
18667
18668 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18669 {
18670+ pax_open_kernel();
18671 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
18672+ pax_close_kernel();
18673 }
18674
18675 /*
18676diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
18677index aa97a07..5c53c32 100644
18678--- a/arch/x86/include/asm/pgtable.h
18679+++ b/arch/x86/include/asm/pgtable.h
18680@@ -46,6 +46,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
18681
18682 #ifndef __PAGETABLE_PUD_FOLDED
18683 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
18684+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
18685 #define pgd_clear(pgd) native_pgd_clear(pgd)
18686 #endif
18687
18688@@ -83,12 +84,53 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
18689
18690 #define arch_end_context_switch(prev) do {} while(0)
18691
18692+#define pax_open_kernel() native_pax_open_kernel()
18693+#define pax_close_kernel() native_pax_close_kernel()
18694 #endif /* CONFIG_PARAVIRT */
18695
18696+#define __HAVE_ARCH_PAX_OPEN_KERNEL
18697+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
18698+
18699+#ifdef CONFIG_PAX_KERNEXEC
18700+static inline unsigned long native_pax_open_kernel(void)
18701+{
18702+ unsigned long cr0;
18703+
18704+ preempt_disable();
18705+ barrier();
18706+ cr0 = read_cr0() ^ X86_CR0_WP;
18707+ BUG_ON(cr0 & X86_CR0_WP);
18708+ write_cr0(cr0);
18709+ barrier();
18710+ return cr0 ^ X86_CR0_WP;
18711+}
18712+
18713+static inline unsigned long native_pax_close_kernel(void)
18714+{
18715+ unsigned long cr0;
18716+
18717+ barrier();
18718+ cr0 = read_cr0() ^ X86_CR0_WP;
18719+ BUG_ON(!(cr0 & X86_CR0_WP));
18720+ write_cr0(cr0);
18721+ barrier();
18722+ preempt_enable_no_resched();
18723+ return cr0 ^ X86_CR0_WP;
18724+}
18725+#else
18726+static inline unsigned long native_pax_open_kernel(void) { return 0; }
18727+static inline unsigned long native_pax_close_kernel(void) { return 0; }
18728+#endif
18729+
18730 /*
18731 * The following only work if pte_present() is true.
18732 * Undefined behaviour if not..
18733 */
18734+static inline int pte_user(pte_t pte)
18735+{
18736+ return pte_val(pte) & _PAGE_USER;
18737+}
18738+
18739 static inline int pte_dirty(pte_t pte)
18740 {
18741 return pte_flags(pte) & _PAGE_DIRTY;
18742@@ -155,6 +197,11 @@ static inline unsigned long pud_pfn(pud_t pud)
18743 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
18744 }
18745
18746+static inline unsigned long pgd_pfn(pgd_t pgd)
18747+{
18748+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
18749+}
18750+
18751 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
18752
18753 static inline int pmd_large(pmd_t pte)
18754@@ -208,9 +255,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
18755 return pte_clear_flags(pte, _PAGE_RW);
18756 }
18757
18758+static inline pte_t pte_mkread(pte_t pte)
18759+{
18760+ return __pte(pte_val(pte) | _PAGE_USER);
18761+}
18762+
18763 static inline pte_t pte_mkexec(pte_t pte)
18764 {
18765- return pte_clear_flags(pte, _PAGE_NX);
18766+#ifdef CONFIG_X86_PAE
18767+ if (__supported_pte_mask & _PAGE_NX)
18768+ return pte_clear_flags(pte, _PAGE_NX);
18769+ else
18770+#endif
18771+ return pte_set_flags(pte, _PAGE_USER);
18772+}
18773+
18774+static inline pte_t pte_exprotect(pte_t pte)
18775+{
18776+#ifdef CONFIG_X86_PAE
18777+ if (__supported_pte_mask & _PAGE_NX)
18778+ return pte_set_flags(pte, _PAGE_NX);
18779+ else
18780+#endif
18781+ return pte_clear_flags(pte, _PAGE_USER);
18782 }
18783
18784 static inline pte_t pte_mkdirty(pte_t pte)
18785@@ -440,6 +507,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
18786 #endif
18787
18788 #ifndef __ASSEMBLY__
18789+
18790+#ifdef CONFIG_PAX_PER_CPU_PGD
18791+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
18792+enum cpu_pgd_type {kernel = 0, user = 1};
18793+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
18794+{
18795+ return cpu_pgd[cpu][type];
18796+}
18797+#endif
18798+
18799 #include <linux/mm_types.h>
18800 #include <linux/mmdebug.h>
18801 #include <linux/log2.h>
18802@@ -586,7 +663,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
18803 * Currently stuck as a macro due to indirect forward reference to
18804 * linux/mmzone.h's __section_mem_map_addr() definition:
18805 */
18806-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
18807+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
18808
18809 /* Find an entry in the second-level page table.. */
18810 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
18811@@ -626,7 +703,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
18812 * Currently stuck as a macro due to indirect forward reference to
18813 * linux/mmzone.h's __section_mem_map_addr() definition:
18814 */
18815-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
18816+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
18817
18818 /* to find an entry in a page-table-directory. */
18819 static inline unsigned long pud_index(unsigned long address)
18820@@ -641,7 +718,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
18821
18822 static inline int pgd_bad(pgd_t pgd)
18823 {
18824- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
18825+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
18826 }
18827
18828 static inline int pgd_none(pgd_t pgd)
18829@@ -664,7 +741,12 @@ static inline int pgd_none(pgd_t pgd)
18830 * pgd_offset() returns a (pgd_t *)
18831 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
18832 */
18833-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
18834+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
18835+
18836+#ifdef CONFIG_PAX_PER_CPU_PGD
18837+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
18838+#endif
18839+
18840 /*
18841 * a shortcut which implies the use of the kernel's pgd, instead
18842 * of a process's
18843@@ -675,6 +757,23 @@ static inline int pgd_none(pgd_t pgd)
18844 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
18845 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
18846
18847+#ifdef CONFIG_X86_32
18848+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
18849+#else
18850+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
18851+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
18852+
18853+#ifdef CONFIG_PAX_MEMORY_UDEREF
18854+#ifdef __ASSEMBLY__
18855+#define pax_user_shadow_base pax_user_shadow_base(%rip)
18856+#else
18857+extern unsigned long pax_user_shadow_base;
18858+extern pgdval_t clone_pgd_mask;
18859+#endif
18860+#endif
18861+
18862+#endif
18863+
18864 #ifndef __ASSEMBLY__
18865
18866 extern int direct_gbpages;
18867@@ -841,11 +940,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
18868 * dst and src can be on the same page, but the range must not overlap,
18869 * and must not cross a page boundary.
18870 */
18871-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
18872+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
18873 {
18874- memcpy(dst, src, count * sizeof(pgd_t));
18875+ pax_open_kernel();
18876+ while (count--)
18877+ *dst++ = *src++;
18878+ pax_close_kernel();
18879 }
18880
18881+#ifdef CONFIG_PAX_PER_CPU_PGD
18882+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
18883+#endif
18884+
18885+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18886+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
18887+#else
18888+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
18889+#endif
18890+
18891 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
18892 static inline int page_level_shift(enum pg_level level)
18893 {
18894diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
18895index 9ee3221..b979c6b 100644
18896--- a/arch/x86/include/asm/pgtable_32.h
18897+++ b/arch/x86/include/asm/pgtable_32.h
18898@@ -25,9 +25,6 @@
18899 struct mm_struct;
18900 struct vm_area_struct;
18901
18902-extern pgd_t swapper_pg_dir[1024];
18903-extern pgd_t initial_page_table[1024];
18904-
18905 static inline void pgtable_cache_init(void) { }
18906 static inline void check_pgt_cache(void) { }
18907 void paging_init(void);
18908@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
18909 # include <asm/pgtable-2level.h>
18910 #endif
18911
18912+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
18913+extern pgd_t initial_page_table[PTRS_PER_PGD];
18914+#ifdef CONFIG_X86_PAE
18915+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
18916+#endif
18917+
18918 #if defined(CONFIG_HIGHPTE)
18919 #define pte_offset_map(dir, address) \
18920 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
18921@@ -62,12 +65,17 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
18922 /* Clear a kernel PTE and flush it from the TLB */
18923 #define kpte_clear_flush(ptep, vaddr) \
18924 do { \
18925+ pax_open_kernel(); \
18926 pte_clear(&init_mm, (vaddr), (ptep)); \
18927+ pax_close_kernel(); \
18928 __flush_tlb_one((vaddr)); \
18929 } while (0)
18930
18931 #endif /* !__ASSEMBLY__ */
18932
18933+#define HAVE_ARCH_UNMAPPED_AREA
18934+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
18935+
18936 /*
18937 * kern_addr_valid() is (1) for FLATMEM and (0) for
18938 * SPARSEMEM and DISCONTIGMEM
18939diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
18940index ed5903b..c7fe163 100644
18941--- a/arch/x86/include/asm/pgtable_32_types.h
18942+++ b/arch/x86/include/asm/pgtable_32_types.h
18943@@ -8,7 +8,7 @@
18944 */
18945 #ifdef CONFIG_X86_PAE
18946 # include <asm/pgtable-3level_types.h>
18947-# define PMD_SIZE (1UL << PMD_SHIFT)
18948+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
18949 # define PMD_MASK (~(PMD_SIZE - 1))
18950 #else
18951 # include <asm/pgtable-2level_types.h>
18952@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
18953 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
18954 #endif
18955
18956+#ifdef CONFIG_PAX_KERNEXEC
18957+#ifndef __ASSEMBLY__
18958+extern unsigned char MODULES_EXEC_VADDR[];
18959+extern unsigned char MODULES_EXEC_END[];
18960+#endif
18961+#include <asm/boot.h>
18962+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
18963+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
18964+#else
18965+#define ktla_ktva(addr) (addr)
18966+#define ktva_ktla(addr) (addr)
18967+#endif
18968+
18969 #define MODULES_VADDR VMALLOC_START
18970 #define MODULES_END VMALLOC_END
18971 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
18972diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
18973index 3874693..d7906ac 100644
18974--- a/arch/x86/include/asm/pgtable_64.h
18975+++ b/arch/x86/include/asm/pgtable_64.h
18976@@ -16,11 +16,16 @@
18977
18978 extern pud_t level3_kernel_pgt[512];
18979 extern pud_t level3_ident_pgt[512];
18980+extern pud_t level3_vmalloc_start_pgt[512];
18981+extern pud_t level3_vmalloc_end_pgt[512];
18982+extern pud_t level3_vmemmap_pgt[512];
18983+extern pud_t level2_vmemmap_pgt[512];
18984 extern pmd_t level2_kernel_pgt[512];
18985 extern pmd_t level2_fixmap_pgt[512];
18986-extern pmd_t level2_ident_pgt[512];
18987+extern pmd_t level2_ident_pgt[512*2];
18988 extern pte_t level1_fixmap_pgt[512];
18989-extern pgd_t init_level4_pgt[];
18990+extern pte_t level1_vsyscall_pgt[512];
18991+extern pgd_t init_level4_pgt[512];
18992
18993 #define swapper_pg_dir init_level4_pgt
18994
18995@@ -62,7 +67,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18996
18997 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18998 {
18999+ pax_open_kernel();
19000 *pmdp = pmd;
19001+ pax_close_kernel();
19002 }
19003
19004 static inline void native_pmd_clear(pmd_t *pmd)
19005@@ -98,7 +105,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
19006
19007 static inline void native_set_pud(pud_t *pudp, pud_t pud)
19008 {
19009+ pax_open_kernel();
19010 *pudp = pud;
19011+ pax_close_kernel();
19012 }
19013
19014 static inline void native_pud_clear(pud_t *pud)
19015@@ -108,6 +117,13 @@ static inline void native_pud_clear(pud_t *pud)
19016
19017 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
19018 {
19019+ pax_open_kernel();
19020+ *pgdp = pgd;
19021+ pax_close_kernel();
19022+}
19023+
19024+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
19025+{
19026 *pgdp = pgd;
19027 }
19028
19029diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
19030index 7166e25..baaa6fe 100644
19031--- a/arch/x86/include/asm/pgtable_64_types.h
19032+++ b/arch/x86/include/asm/pgtable_64_types.h
19033@@ -61,9 +61,14 @@ typedef struct { pteval_t pte; } pte_t;
19034 #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
19035 #define MODULES_END _AC(0xffffffffff000000, UL)
19036 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
19037+#define MODULES_EXEC_VADDR MODULES_VADDR
19038+#define MODULES_EXEC_END MODULES_END
19039 #define ESPFIX_PGD_ENTRY _AC(-2, UL)
19040 #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
19041
19042+#define ktla_ktva(addr) (addr)
19043+#define ktva_ktla(addr) (addr)
19044+
19045 #define EARLY_DYNAMIC_PAGE_TABLES 64
19046
19047 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
19048diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
19049index f216963..6bd7c21 100644
19050--- a/arch/x86/include/asm/pgtable_types.h
19051+++ b/arch/x86/include/asm/pgtable_types.h
19052@@ -111,8 +111,10 @@
19053
19054 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
19055 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
19056-#else
19057+#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
19058 #define _PAGE_NX (_AT(pteval_t, 0))
19059+#else
19060+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
19061 #endif
19062
19063 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
19064@@ -151,6 +153,9 @@
19065 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
19066 _PAGE_ACCESSED)
19067
19068+#define PAGE_READONLY_NOEXEC PAGE_READONLY
19069+#define PAGE_SHARED_NOEXEC PAGE_SHARED
19070+
19071 #define __PAGE_KERNEL_EXEC \
19072 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
19073 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
19074@@ -161,7 +166,7 @@
19075 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
19076 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
19077 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
19078-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
19079+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
19080 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
19081 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
19082 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
19083@@ -218,7 +223,7 @@
19084 #ifdef CONFIG_X86_64
19085 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
19086 #else
19087-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
19088+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
19089 #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
19090 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
19091 #endif
19092@@ -257,7 +262,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
19093 {
19094 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
19095 }
19096+#endif
19097
19098+#if PAGETABLE_LEVELS == 3
19099+#include <asm-generic/pgtable-nopud.h>
19100+#endif
19101+
19102+#if PAGETABLE_LEVELS == 2
19103+#include <asm-generic/pgtable-nopmd.h>
19104+#endif
19105+
19106+#ifndef __ASSEMBLY__
19107 #if PAGETABLE_LEVELS > 3
19108 typedef struct { pudval_t pud; } pud_t;
19109
19110@@ -271,8 +286,6 @@ static inline pudval_t native_pud_val(pud_t pud)
19111 return pud.pud;
19112 }
19113 #else
19114-#include <asm-generic/pgtable-nopud.h>
19115-
19116 static inline pudval_t native_pud_val(pud_t pud)
19117 {
19118 return native_pgd_val(pud.pgd);
19119@@ -292,8 +305,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
19120 return pmd.pmd;
19121 }
19122 #else
19123-#include <asm-generic/pgtable-nopmd.h>
19124-
19125 static inline pmdval_t native_pmd_val(pmd_t pmd)
19126 {
19127 return native_pgd_val(pmd.pud.pgd);
19128@@ -333,7 +344,6 @@ typedef struct page *pgtable_t;
19129
19130 extern pteval_t __supported_pte_mask;
19131 extern void set_nx(void);
19132-extern int nx_enabled;
19133
19134 #define pgprot_writecombine pgprot_writecombine
19135 extern pgprot_t pgprot_writecombine(pgprot_t prot);
19136diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
19137index 7024c12..71c46b9 100644
19138--- a/arch/x86/include/asm/preempt.h
19139+++ b/arch/x86/include/asm/preempt.h
19140@@ -87,7 +87,7 @@ static __always_inline void __preempt_count_sub(int val)
19141 */
19142 static __always_inline bool __preempt_count_dec_and_test(void)
19143 {
19144- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
19145+ GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
19146 }
19147
19148 /*
19149diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
19150index eb71ec7..f06532a 100644
19151--- a/arch/x86/include/asm/processor.h
19152+++ b/arch/x86/include/asm/processor.h
19153@@ -127,7 +127,7 @@ struct cpuinfo_x86 {
19154 /* Index into per_cpu list: */
19155 u16 cpu_index;
19156 u32 microcode;
19157-} __attribute__((__aligned__(SMP_CACHE_BYTES)));
19158+} __attribute__((__aligned__(SMP_CACHE_BYTES))) __randomize_layout;
19159
19160 #define X86_VENDOR_INTEL 0
19161 #define X86_VENDOR_CYRIX 1
19162@@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
19163 : "memory");
19164 }
19165
19166+/* invpcid (%rdx),%rax */
19167+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
19168+
19169+#define INVPCID_SINGLE_ADDRESS 0UL
19170+#define INVPCID_SINGLE_CONTEXT 1UL
19171+#define INVPCID_ALL_GLOBAL 2UL
19172+#define INVPCID_ALL_NONGLOBAL 3UL
19173+
19174+#define PCID_KERNEL 0UL
19175+#define PCID_USER 1UL
19176+#define PCID_NOFLUSH (1UL << 63)
19177+
19178 static inline void load_cr3(pgd_t *pgdir)
19179 {
19180- write_cr3(__pa(pgdir));
19181+ write_cr3(__pa(pgdir) | PCID_KERNEL);
19182 }
19183
19184 #ifdef CONFIG_X86_32
19185@@ -282,7 +294,7 @@ struct tss_struct {
19186
19187 } ____cacheline_aligned;
19188
19189-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
19190+extern struct tss_struct init_tss[NR_CPUS];
19191
19192 /*
19193 * Save the original ist values for checking stack pointers during debugging
19194@@ -478,6 +490,7 @@ struct thread_struct {
19195 unsigned short ds;
19196 unsigned short fsindex;
19197 unsigned short gsindex;
19198+ unsigned short ss;
19199 #endif
19200 #ifdef CONFIG_X86_32
19201 unsigned long ip;
19202@@ -587,29 +600,8 @@ static inline void load_sp0(struct tss_struct *tss,
19203 extern unsigned long mmu_cr4_features;
19204 extern u32 *trampoline_cr4_features;
19205
19206-static inline void set_in_cr4(unsigned long mask)
19207-{
19208- unsigned long cr4;
19209-
19210- mmu_cr4_features |= mask;
19211- if (trampoline_cr4_features)
19212- *trampoline_cr4_features = mmu_cr4_features;
19213- cr4 = read_cr4();
19214- cr4 |= mask;
19215- write_cr4(cr4);
19216-}
19217-
19218-static inline void clear_in_cr4(unsigned long mask)
19219-{
19220- unsigned long cr4;
19221-
19222- mmu_cr4_features &= ~mask;
19223- if (trampoline_cr4_features)
19224- *trampoline_cr4_features = mmu_cr4_features;
19225- cr4 = read_cr4();
19226- cr4 &= ~mask;
19227- write_cr4(cr4);
19228-}
19229+extern void set_in_cr4(unsigned long mask);
19230+extern void clear_in_cr4(unsigned long mask);
19231
19232 typedef struct {
19233 unsigned long seg;
19234@@ -837,11 +829,18 @@ static inline void spin_lock_prefetch(const void *x)
19235 */
19236 #define TASK_SIZE PAGE_OFFSET
19237 #define TASK_SIZE_MAX TASK_SIZE
19238+
19239+#ifdef CONFIG_PAX_SEGMEXEC
19240+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
19241+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
19242+#else
19243 #define STACK_TOP TASK_SIZE
19244-#define STACK_TOP_MAX STACK_TOP
19245+#endif
19246+
19247+#define STACK_TOP_MAX TASK_SIZE
19248
19249 #define INIT_THREAD { \
19250- .sp0 = sizeof(init_stack) + (long)&init_stack, \
19251+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
19252 .vm86_info = NULL, \
19253 .sysenter_cs = __KERNEL_CS, \
19254 .io_bitmap_ptr = NULL, \
19255@@ -855,7 +854,7 @@ static inline void spin_lock_prefetch(const void *x)
19256 */
19257 #define INIT_TSS { \
19258 .x86_tss = { \
19259- .sp0 = sizeof(init_stack) + (long)&init_stack, \
19260+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
19261 .ss0 = __KERNEL_DS, \
19262 .ss1 = __KERNEL_CS, \
19263 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
19264@@ -866,11 +865,7 @@ static inline void spin_lock_prefetch(const void *x)
19265 extern unsigned long thread_saved_pc(struct task_struct *tsk);
19266
19267 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
19268-#define KSTK_TOP(info) \
19269-({ \
19270- unsigned long *__ptr = (unsigned long *)(info); \
19271- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
19272-})
19273+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
19274
19275 /*
19276 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
19277@@ -885,7 +880,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
19278 #define task_pt_regs(task) \
19279 ({ \
19280 struct pt_regs *__regs__; \
19281- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
19282+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
19283 __regs__ - 1; \
19284 })
19285
19286@@ -895,13 +890,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
19287 /*
19288 * User space process size. 47bits minus one guard page.
19289 */
19290-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
19291+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
19292
19293 /* This decides where the kernel will search for a free chunk of vm
19294 * space during mmap's.
19295 */
19296 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
19297- 0xc0000000 : 0xFFFFe000)
19298+ 0xc0000000 : 0xFFFFf000)
19299
19300 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
19301 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
19302@@ -912,11 +907,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
19303 #define STACK_TOP_MAX TASK_SIZE_MAX
19304
19305 #define INIT_THREAD { \
19306- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
19307+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
19308 }
19309
19310 #define INIT_TSS { \
19311- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
19312+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
19313 }
19314
19315 /*
19316@@ -944,6 +939,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
19317 */
19318 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
19319
19320+#ifdef CONFIG_PAX_SEGMEXEC
19321+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
19322+#endif
19323+
19324 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
19325
19326 /* Get/set a process' ability to use the timestamp counter instruction */
19327@@ -970,7 +969,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
19328 return 0;
19329 }
19330
19331-extern unsigned long arch_align_stack(unsigned long sp);
19332+#define arch_align_stack(x) ((x) & ~0xfUL)
19333 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
19334
19335 void default_idle(void);
19336@@ -980,6 +979,6 @@ bool xen_set_default_idle(void);
19337 #define xen_set_default_idle 0
19338 #endif
19339
19340-void stop_this_cpu(void *dummy);
19341+void stop_this_cpu(void *dummy) __noreturn;
19342 void df_debug(struct pt_regs *regs, long error_code);
19343 #endif /* _ASM_X86_PROCESSOR_H */
19344diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
19345index 6205f0c..688a3a9 100644
19346--- a/arch/x86/include/asm/ptrace.h
19347+++ b/arch/x86/include/asm/ptrace.h
19348@@ -84,28 +84,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
19349 }
19350
19351 /*
19352- * user_mode_vm(regs) determines whether a register set came from user mode.
19353+ * user_mode(regs) determines whether a register set came from user mode.
19354 * This is true if V8086 mode was enabled OR if the register set was from
19355 * protected mode with RPL-3 CS value. This tricky test checks that with
19356 * one comparison. Many places in the kernel can bypass this full check
19357- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
19358+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
19359+ * be used.
19360 */
19361-static inline int user_mode(struct pt_regs *regs)
19362+static inline int user_mode_novm(struct pt_regs *regs)
19363 {
19364 #ifdef CONFIG_X86_32
19365 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
19366 #else
19367- return !!(regs->cs & 3);
19368+ return !!(regs->cs & SEGMENT_RPL_MASK);
19369 #endif
19370 }
19371
19372-static inline int user_mode_vm(struct pt_regs *regs)
19373+static inline int user_mode(struct pt_regs *regs)
19374 {
19375 #ifdef CONFIG_X86_32
19376 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
19377 USER_RPL;
19378 #else
19379- return user_mode(regs);
19380+ return user_mode_novm(regs);
19381 #endif
19382 }
19383
19384@@ -121,15 +122,16 @@ static inline int v8086_mode(struct pt_regs *regs)
19385 #ifdef CONFIG_X86_64
19386 static inline bool user_64bit_mode(struct pt_regs *regs)
19387 {
19388+ unsigned long cs = regs->cs & 0xffff;
19389 #ifndef CONFIG_PARAVIRT
19390 /*
19391 * On non-paravirt systems, this is the only long mode CPL 3
19392 * selector. We do not allow long mode selectors in the LDT.
19393 */
19394- return regs->cs == __USER_CS;
19395+ return cs == __USER_CS;
19396 #else
19397 /* Headers are too twisted for this to go in paravirt.h. */
19398- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
19399+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
19400 #endif
19401 }
19402
19403@@ -180,9 +182,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
19404 * Traps from the kernel do not save sp and ss.
19405 * Use the helper function to retrieve sp.
19406 */
19407- if (offset == offsetof(struct pt_regs, sp) &&
19408- regs->cs == __KERNEL_CS)
19409- return kernel_stack_pointer(regs);
19410+ if (offset == offsetof(struct pt_regs, sp)) {
19411+ unsigned long cs = regs->cs & 0xffff;
19412+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
19413+ return kernel_stack_pointer(regs);
19414+ }
19415 #endif
19416 return *(unsigned long *)((unsigned long)regs + offset);
19417 }
19418diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
19419index ae0e241..e80b10b 100644
19420--- a/arch/x86/include/asm/qrwlock.h
19421+++ b/arch/x86/include/asm/qrwlock.h
19422@@ -7,8 +7,8 @@
19423 #define queue_write_unlock queue_write_unlock
19424 static inline void queue_write_unlock(struct qrwlock *lock)
19425 {
19426- barrier();
19427- ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
19428+ barrier();
19429+ ACCESS_ONCE_RW(*(u8 *)&lock->cnts) = 0;
19430 }
19431 #endif
19432
19433diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
19434index 9c6b890..5305f53 100644
19435--- a/arch/x86/include/asm/realmode.h
19436+++ b/arch/x86/include/asm/realmode.h
19437@@ -22,16 +22,14 @@ struct real_mode_header {
19438 #endif
19439 /* APM/BIOS reboot */
19440 u32 machine_real_restart_asm;
19441-#ifdef CONFIG_X86_64
19442 u32 machine_real_restart_seg;
19443-#endif
19444 };
19445
19446 /* This must match data at trampoline_32/64.S */
19447 struct trampoline_header {
19448 #ifdef CONFIG_X86_32
19449 u32 start;
19450- u16 gdt_pad;
19451+ u16 boot_cs;
19452 u16 gdt_limit;
19453 u32 gdt_base;
19454 #else
19455diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
19456index a82c4f1..ac45053 100644
19457--- a/arch/x86/include/asm/reboot.h
19458+++ b/arch/x86/include/asm/reboot.h
19459@@ -6,13 +6,13 @@
19460 struct pt_regs;
19461
19462 struct machine_ops {
19463- void (*restart)(char *cmd);
19464- void (*halt)(void);
19465- void (*power_off)(void);
19466+ void (* __noreturn restart)(char *cmd);
19467+ void (* __noreturn halt)(void);
19468+ void (* __noreturn power_off)(void);
19469 void (*shutdown)(void);
19470 void (*crash_shutdown)(struct pt_regs *);
19471- void (*emergency_restart)(void);
19472-};
19473+ void (* __noreturn emergency_restart)(void);
19474+} __no_const;
19475
19476 extern struct machine_ops machine_ops;
19477
19478diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
19479index 8f7866a..e442f20 100644
19480--- a/arch/x86/include/asm/rmwcc.h
19481+++ b/arch/x86/include/asm/rmwcc.h
19482@@ -3,7 +3,34 @@
19483
19484 #ifdef CC_HAVE_ASM_GOTO
19485
19486-#define __GEN_RMWcc(fullop, var, cc, ...) \
19487+#ifdef CONFIG_PAX_REFCOUNT
19488+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
19489+do { \
19490+ asm_volatile_goto (fullop \
19491+ ";jno 0f\n" \
19492+ fullantiop \
19493+ ";int $4\n0:\n" \
19494+ _ASM_EXTABLE(0b, 0b) \
19495+ ";j" cc " %l[cc_label]" \
19496+ : : "m" (var), ## __VA_ARGS__ \
19497+ : "memory" : cc_label); \
19498+ return 0; \
19499+cc_label: \
19500+ return 1; \
19501+} while (0)
19502+#else
19503+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
19504+do { \
19505+ asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \
19506+ : : "m" (var), ## __VA_ARGS__ \
19507+ : "memory" : cc_label); \
19508+ return 0; \
19509+cc_label: \
19510+ return 1; \
19511+} while (0)
19512+#endif
19513+
19514+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
19515 do { \
19516 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
19517 : : "m" (var), ## __VA_ARGS__ \
19518@@ -13,15 +40,46 @@ cc_label: \
19519 return 1; \
19520 } while (0)
19521
19522-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
19523- __GEN_RMWcc(op " " arg0, var, cc)
19524+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
19525+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
19526
19527-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
19528- __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
19529+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
19530+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
19531+
19532+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
19533+ __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
19534+
19535+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
19536+ __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
19537
19538 #else /* !CC_HAVE_ASM_GOTO */
19539
19540-#define __GEN_RMWcc(fullop, var, cc, ...) \
19541+#ifdef CONFIG_PAX_REFCOUNT
19542+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
19543+do { \
19544+ char c; \
19545+ asm volatile (fullop \
19546+ ";jno 0f\n" \
19547+ fullantiop \
19548+ ";int $4\n0:\n" \
19549+ _ASM_EXTABLE(0b, 0b) \
19550+ "; set" cc " %1" \
19551+ : "+m" (var), "=qm" (c) \
19552+ : __VA_ARGS__ : "memory"); \
19553+ return c != 0; \
19554+} while (0)
19555+#else
19556+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
19557+do { \
19558+ char c; \
19559+ asm volatile (fullop "; set" cc " %1" \
19560+ : "+m" (var), "=qm" (c) \
19561+ : __VA_ARGS__ : "memory"); \
19562+ return c != 0; \
19563+} while (0)
19564+#endif
19565+
19566+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
19567 do { \
19568 char c; \
19569 asm volatile (fullop "; set" cc " %1" \
19570@@ -30,11 +88,17 @@ do { \
19571 return c != 0; \
19572 } while (0)
19573
19574-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
19575- __GEN_RMWcc(op " " arg0, var, cc)
19576+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
19577+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
19578+
19579+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
19580+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
19581+
19582+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
19583+ __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
19584
19585-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
19586- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
19587+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
19588+ __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
19589
19590 #endif /* CC_HAVE_ASM_GOTO */
19591
19592diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
19593index cad82c9..2e5c5c1 100644
19594--- a/arch/x86/include/asm/rwsem.h
19595+++ b/arch/x86/include/asm/rwsem.h
19596@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
19597 {
19598 asm volatile("# beginning down_read\n\t"
19599 LOCK_PREFIX _ASM_INC "(%1)\n\t"
19600+
19601+#ifdef CONFIG_PAX_REFCOUNT
19602+ "jno 0f\n"
19603+ LOCK_PREFIX _ASM_DEC "(%1)\n"
19604+ "int $4\n0:\n"
19605+ _ASM_EXTABLE(0b, 0b)
19606+#endif
19607+
19608 /* adds 0x00000001 */
19609 " jns 1f\n"
19610 " call call_rwsem_down_read_failed\n"
19611@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
19612 "1:\n\t"
19613 " mov %1,%2\n\t"
19614 " add %3,%2\n\t"
19615+
19616+#ifdef CONFIG_PAX_REFCOUNT
19617+ "jno 0f\n"
19618+ "sub %3,%2\n"
19619+ "int $4\n0:\n"
19620+ _ASM_EXTABLE(0b, 0b)
19621+#endif
19622+
19623 " jle 2f\n\t"
19624 LOCK_PREFIX " cmpxchg %2,%0\n\t"
19625 " jnz 1b\n\t"
19626@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
19627 long tmp;
19628 asm volatile("# beginning down_write\n\t"
19629 LOCK_PREFIX " xadd %1,(%2)\n\t"
19630+
19631+#ifdef CONFIG_PAX_REFCOUNT
19632+ "jno 0f\n"
19633+ "mov %1,(%2)\n"
19634+ "int $4\n0:\n"
19635+ _ASM_EXTABLE(0b, 0b)
19636+#endif
19637+
19638 /* adds 0xffff0001, returns the old value */
19639 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
19640 /* was the active mask 0 before? */
19641@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
19642 long tmp;
19643 asm volatile("# beginning __up_read\n\t"
19644 LOCK_PREFIX " xadd %1,(%2)\n\t"
19645+
19646+#ifdef CONFIG_PAX_REFCOUNT
19647+ "jno 0f\n"
19648+ "mov %1,(%2)\n"
19649+ "int $4\n0:\n"
19650+ _ASM_EXTABLE(0b, 0b)
19651+#endif
19652+
19653 /* subtracts 1, returns the old value */
19654 " jns 1f\n\t"
19655 " call call_rwsem_wake\n" /* expects old value in %edx */
19656@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
19657 long tmp;
19658 asm volatile("# beginning __up_write\n\t"
19659 LOCK_PREFIX " xadd %1,(%2)\n\t"
19660+
19661+#ifdef CONFIG_PAX_REFCOUNT
19662+ "jno 0f\n"
19663+ "mov %1,(%2)\n"
19664+ "int $4\n0:\n"
19665+ _ASM_EXTABLE(0b, 0b)
19666+#endif
19667+
19668 /* subtracts 0xffff0001, returns the old value */
19669 " jns 1f\n\t"
19670 " call call_rwsem_wake\n" /* expects old value in %edx */
19671@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
19672 {
19673 asm volatile("# beginning __downgrade_write\n\t"
19674 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
19675+
19676+#ifdef CONFIG_PAX_REFCOUNT
19677+ "jno 0f\n"
19678+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
19679+ "int $4\n0:\n"
19680+ _ASM_EXTABLE(0b, 0b)
19681+#endif
19682+
19683 /*
19684 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
19685 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
19686@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
19687 */
19688 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
19689 {
19690- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
19691+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
19692+
19693+#ifdef CONFIG_PAX_REFCOUNT
19694+ "jno 0f\n"
19695+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
19696+ "int $4\n0:\n"
19697+ _ASM_EXTABLE(0b, 0b)
19698+#endif
19699+
19700 : "+m" (sem->count)
19701 : "er" (delta));
19702 }
19703@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
19704 */
19705 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
19706 {
19707- return delta + xadd(&sem->count, delta);
19708+ return delta + xadd_check_overflow(&sem->count, delta);
19709 }
19710
19711 #endif /* __KERNEL__ */
19712diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
19713index 6f1c3a8..7744f19 100644
19714--- a/arch/x86/include/asm/segment.h
19715+++ b/arch/x86/include/asm/segment.h
19716@@ -64,10 +64,15 @@
19717 * 26 - ESPFIX small SS
19718 * 27 - per-cpu [ offset to per-cpu data area ]
19719 * 28 - stack_canary-20 [ for stack protector ]
19720- * 29 - unused
19721- * 30 - unused
19722+ * 29 - PCI BIOS CS
19723+ * 30 - PCI BIOS DS
19724 * 31 - TSS for double fault handler
19725 */
19726+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
19727+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
19728+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
19729+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
19730+
19731 #define GDT_ENTRY_TLS_MIN 6
19732 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
19733
19734@@ -79,6 +84,8 @@
19735
19736 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
19737
19738+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
19739+
19740 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
19741
19742 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
19743@@ -104,6 +111,12 @@
19744 #define __KERNEL_STACK_CANARY 0
19745 #endif
19746
19747+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
19748+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
19749+
19750+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
19751+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
19752+
19753 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
19754
19755 /*
19756@@ -141,7 +154,7 @@
19757 */
19758
19759 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
19760-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
19761+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
19762
19763
19764 #else
19765@@ -165,6 +178,8 @@
19766 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
19767 #define __USER32_DS __USER_DS
19768
19769+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
19770+
19771 #define GDT_ENTRY_TSS 8 /* needs two entries */
19772 #define GDT_ENTRY_LDT 10 /* needs two entries */
19773 #define GDT_ENTRY_TLS_MIN 12
19774@@ -173,6 +188,8 @@
19775 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
19776 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
19777
19778+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
19779+
19780 /* TLS indexes for 64bit - hardcoded in arch_prctl */
19781 #define FS_TLS 0
19782 #define GS_TLS 1
19783@@ -180,12 +197,14 @@
19784 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
19785 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
19786
19787-#define GDT_ENTRIES 16
19788+#define GDT_ENTRIES 17
19789
19790 #endif
19791
19792 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
19793+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
19794 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
19795+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
19796 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
19797 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
19798 #ifndef CONFIG_PARAVIRT
19799@@ -268,7 +287,7 @@ static inline unsigned long get_limit(unsigned long segment)
19800 {
19801 unsigned long __limit;
19802 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
19803- return __limit + 1;
19804+ return __limit;
19805 }
19806
19807 #endif /* !__ASSEMBLY__ */
19808diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
19809index 8d3120f..352b440 100644
19810--- a/arch/x86/include/asm/smap.h
19811+++ b/arch/x86/include/asm/smap.h
19812@@ -25,11 +25,40 @@
19813
19814 #include <asm/alternative-asm.h>
19815
19816+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19817+#define ASM_PAX_OPEN_USERLAND \
19818+ 661: jmp 663f; \
19819+ .pushsection .altinstr_replacement, "a" ; \
19820+ 662: pushq %rax; nop; \
19821+ .popsection ; \
19822+ .pushsection .altinstructions, "a" ; \
19823+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19824+ .popsection ; \
19825+ call __pax_open_userland; \
19826+ popq %rax; \
19827+ 663:
19828+
19829+#define ASM_PAX_CLOSE_USERLAND \
19830+ 661: jmp 663f; \
19831+ .pushsection .altinstr_replacement, "a" ; \
19832+ 662: pushq %rax; nop; \
19833+ .popsection; \
19834+ .pushsection .altinstructions, "a" ; \
19835+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19836+ .popsection; \
19837+ call __pax_close_userland; \
19838+ popq %rax; \
19839+ 663:
19840+#else
19841+#define ASM_PAX_OPEN_USERLAND
19842+#define ASM_PAX_CLOSE_USERLAND
19843+#endif
19844+
19845 #ifdef CONFIG_X86_SMAP
19846
19847 #define ASM_CLAC \
19848 661: ASM_NOP3 ; \
19849- .pushsection .altinstr_replacement, "ax" ; \
19850+ .pushsection .altinstr_replacement, "a" ; \
19851 662: __ASM_CLAC ; \
19852 .popsection ; \
19853 .pushsection .altinstructions, "a" ; \
19854@@ -38,7 +67,7 @@
19855
19856 #define ASM_STAC \
19857 661: ASM_NOP3 ; \
19858- .pushsection .altinstr_replacement, "ax" ; \
19859+ .pushsection .altinstr_replacement, "a" ; \
19860 662: __ASM_STAC ; \
19861 .popsection ; \
19862 .pushsection .altinstructions, "a" ; \
19863@@ -56,6 +85,37 @@
19864
19865 #include <asm/alternative.h>
19866
19867+#define __HAVE_ARCH_PAX_OPEN_USERLAND
19868+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
19869+
19870+extern void __pax_open_userland(void);
19871+static __always_inline unsigned long pax_open_userland(void)
19872+{
19873+
19874+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19875+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
19876+ :
19877+ : [open] "i" (__pax_open_userland)
19878+ : "memory", "rax");
19879+#endif
19880+
19881+ return 0;
19882+}
19883+
19884+extern void __pax_close_userland(void);
19885+static __always_inline unsigned long pax_close_userland(void)
19886+{
19887+
19888+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19889+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
19890+ :
19891+ : [close] "i" (__pax_close_userland)
19892+ : "memory", "rax");
19893+#endif
19894+
19895+ return 0;
19896+}
19897+
19898 #ifdef CONFIG_X86_SMAP
19899
19900 static __always_inline void clac(void)
19901diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
19902index 8cd27e0..7f05ec8 100644
19903--- a/arch/x86/include/asm/smp.h
19904+++ b/arch/x86/include/asm/smp.h
19905@@ -35,7 +35,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
19906 /* cpus sharing the last level cache: */
19907 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
19908 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
19909-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
19910+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
19911
19912 static inline struct cpumask *cpu_sibling_mask(int cpu)
19913 {
19914@@ -78,7 +78,7 @@ struct smp_ops {
19915
19916 void (*send_call_func_ipi)(const struct cpumask *mask);
19917 void (*send_call_func_single_ipi)(int cpu);
19918-};
19919+} __no_const;
19920
19921 /* Globals due to paravirt */
19922 extern void set_cpu_sibling_map(int cpu);
19923@@ -190,14 +190,8 @@ extern unsigned disabled_cpus;
19924 extern int safe_smp_processor_id(void);
19925
19926 #elif defined(CONFIG_X86_64_SMP)
19927-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19928-
19929-#define stack_smp_processor_id() \
19930-({ \
19931- struct thread_info *ti; \
19932- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
19933- ti->cpu; \
19934-})
19935+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19936+#define stack_smp_processor_id() raw_smp_processor_id()
19937 #define safe_smp_processor_id() smp_processor_id()
19938
19939 #endif
19940diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
19941index 54f1c80..39362a5 100644
19942--- a/arch/x86/include/asm/spinlock.h
19943+++ b/arch/x86/include/asm/spinlock.h
19944@@ -223,6 +223,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
19945 static inline void arch_read_lock(arch_rwlock_t *rw)
19946 {
19947 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
19948+
19949+#ifdef CONFIG_PAX_REFCOUNT
19950+ "jno 0f\n"
19951+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
19952+ "int $4\n0:\n"
19953+ _ASM_EXTABLE(0b, 0b)
19954+#endif
19955+
19956 "jns 1f\n"
19957 "call __read_lock_failed\n\t"
19958 "1:\n"
19959@@ -232,6 +240,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
19960 static inline void arch_write_lock(arch_rwlock_t *rw)
19961 {
19962 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
19963+
19964+#ifdef CONFIG_PAX_REFCOUNT
19965+ "jno 0f\n"
19966+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
19967+ "int $4\n0:\n"
19968+ _ASM_EXTABLE(0b, 0b)
19969+#endif
19970+
19971 "jz 1f\n"
19972 "call __write_lock_failed\n\t"
19973 "1:\n"
19974@@ -261,13 +277,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
19975
19976 static inline void arch_read_unlock(arch_rwlock_t *rw)
19977 {
19978- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
19979+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
19980+
19981+#ifdef CONFIG_PAX_REFCOUNT
19982+ "jno 0f\n"
19983+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
19984+ "int $4\n0:\n"
19985+ _ASM_EXTABLE(0b, 0b)
19986+#endif
19987+
19988 :"+m" (rw->lock) : : "memory");
19989 }
19990
19991 static inline void arch_write_unlock(arch_rwlock_t *rw)
19992 {
19993- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
19994+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
19995+
19996+#ifdef CONFIG_PAX_REFCOUNT
19997+ "jno 0f\n"
19998+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
19999+ "int $4\n0:\n"
20000+ _ASM_EXTABLE(0b, 0b)
20001+#endif
20002+
20003 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
20004 }
20005 #else
20006diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
20007index 6a99859..03cb807 100644
20008--- a/arch/x86/include/asm/stackprotector.h
20009+++ b/arch/x86/include/asm/stackprotector.h
20010@@ -47,7 +47,7 @@
20011 * head_32 for boot CPU and setup_per_cpu_areas() for others.
20012 */
20013 #define GDT_STACK_CANARY_INIT \
20014- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
20015+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
20016
20017 /*
20018 * Initialize the stackprotector canary value.
20019@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
20020
20021 static inline void load_stack_canary_segment(void)
20022 {
20023-#ifdef CONFIG_X86_32
20024+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
20025 asm volatile ("mov %0, %%gs" : : "r" (0));
20026 #endif
20027 }
20028diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
20029index 70bbe39..4ae2bd4 100644
20030--- a/arch/x86/include/asm/stacktrace.h
20031+++ b/arch/x86/include/asm/stacktrace.h
20032@@ -11,28 +11,20 @@
20033
20034 extern int kstack_depth_to_print;
20035
20036-struct thread_info;
20037+struct task_struct;
20038 struct stacktrace_ops;
20039
20040-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
20041- unsigned long *stack,
20042- unsigned long bp,
20043- const struct stacktrace_ops *ops,
20044- void *data,
20045- unsigned long *end,
20046- int *graph);
20047+typedef unsigned long walk_stack_t(struct task_struct *task,
20048+ void *stack_start,
20049+ unsigned long *stack,
20050+ unsigned long bp,
20051+ const struct stacktrace_ops *ops,
20052+ void *data,
20053+ unsigned long *end,
20054+ int *graph);
20055
20056-extern unsigned long
20057-print_context_stack(struct thread_info *tinfo,
20058- unsigned long *stack, unsigned long bp,
20059- const struct stacktrace_ops *ops, void *data,
20060- unsigned long *end, int *graph);
20061-
20062-extern unsigned long
20063-print_context_stack_bp(struct thread_info *tinfo,
20064- unsigned long *stack, unsigned long bp,
20065- const struct stacktrace_ops *ops, void *data,
20066- unsigned long *end, int *graph);
20067+extern walk_stack_t print_context_stack;
20068+extern walk_stack_t print_context_stack_bp;
20069
20070 /* Generic stack tracer with callbacks */
20071
20072@@ -40,7 +32,7 @@ struct stacktrace_ops {
20073 void (*address)(void *data, unsigned long address, int reliable);
20074 /* On negative return stop dumping */
20075 int (*stack)(void *data, char *name);
20076- walk_stack_t walk_stack;
20077+ walk_stack_t *walk_stack;
20078 };
20079
20080 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
20081diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
20082index d7f3b3b..3cc39f1 100644
20083--- a/arch/x86/include/asm/switch_to.h
20084+++ b/arch/x86/include/asm/switch_to.h
20085@@ -108,7 +108,7 @@ do { \
20086 "call __switch_to\n\t" \
20087 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
20088 __switch_canary \
20089- "movq %P[thread_info](%%rsi),%%r8\n\t" \
20090+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
20091 "movq %%rax,%%rdi\n\t" \
20092 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
20093 "jnz ret_from_fork\n\t" \
20094@@ -119,7 +119,7 @@ do { \
20095 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
20096 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
20097 [_tif_fork] "i" (_TIF_FORK), \
20098- [thread_info] "i" (offsetof(struct task_struct, stack)), \
20099+ [thread_info] "m" (current_tinfo), \
20100 [current_task] "m" (current_task) \
20101 __switch_canary_iparam \
20102 : "memory", "cc" __EXTRA_CLOBBER)
20103diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
20104index 8540538..4b0b5e9 100644
20105--- a/arch/x86/include/asm/thread_info.h
20106+++ b/arch/x86/include/asm/thread_info.h
20107@@ -24,7 +24,6 @@ struct exec_domain;
20108 #include <linux/atomic.h>
20109
20110 struct thread_info {
20111- struct task_struct *task; /* main task structure */
20112 struct exec_domain *exec_domain; /* execution domain */
20113 __u32 flags; /* low level flags */
20114 __u32 status; /* thread synchronous flags */
20115@@ -33,13 +32,13 @@ struct thread_info {
20116 mm_segment_t addr_limit;
20117 struct restart_block restart_block;
20118 void __user *sysenter_return;
20119+ unsigned long lowest_stack;
20120 unsigned int sig_on_uaccess_error:1;
20121 unsigned int uaccess_err:1; /* uaccess failed */
20122 };
20123
20124-#define INIT_THREAD_INFO(tsk) \
20125+#define INIT_THREAD_INFO \
20126 { \
20127- .task = &tsk, \
20128 .exec_domain = &default_exec_domain, \
20129 .flags = 0, \
20130 .cpu = 0, \
20131@@ -50,7 +49,7 @@ struct thread_info {
20132 }, \
20133 }
20134
20135-#define init_thread_info (init_thread_union.thread_info)
20136+#define init_thread_info (init_thread_union.stack)
20137 #define init_stack (init_thread_union.stack)
20138
20139 #else /* !__ASSEMBLY__ */
20140@@ -91,6 +90,7 @@ struct thread_info {
20141 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
20142 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
20143 #define TIF_X32 30 /* 32-bit native x86-64 binary */
20144+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
20145
20146 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
20147 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
20148@@ -115,17 +115,18 @@ struct thread_info {
20149 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
20150 #define _TIF_ADDR32 (1 << TIF_ADDR32)
20151 #define _TIF_X32 (1 << TIF_X32)
20152+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
20153
20154 /* work to do in syscall_trace_enter() */
20155 #define _TIF_WORK_SYSCALL_ENTRY \
20156 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
20157 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
20158- _TIF_NOHZ)
20159+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
20160
20161 /* work to do in syscall_trace_leave() */
20162 #define _TIF_WORK_SYSCALL_EXIT \
20163 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
20164- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
20165+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
20166
20167 /* work to do on interrupt/exception return */
20168 #define _TIF_WORK_MASK \
20169@@ -136,7 +137,7 @@ struct thread_info {
20170 /* work to do on any return to user space */
20171 #define _TIF_ALLWORK_MASK \
20172 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
20173- _TIF_NOHZ)
20174+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
20175
20176 /* Only used for 64 bit */
20177 #define _TIF_DO_NOTIFY_MASK \
20178@@ -151,7 +152,6 @@ struct thread_info {
20179 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
20180
20181 #define STACK_WARN (THREAD_SIZE/8)
20182-#define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8))
20183
20184 /*
20185 * macros/functions for gaining access to the thread information structure
20186@@ -162,26 +162,18 @@ struct thread_info {
20187
20188 DECLARE_PER_CPU(unsigned long, kernel_stack);
20189
20190+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
20191+
20192 static inline struct thread_info *current_thread_info(void)
20193 {
20194- struct thread_info *ti;
20195- ti = (void *)(this_cpu_read_stable(kernel_stack) +
20196- KERNEL_STACK_OFFSET - THREAD_SIZE);
20197- return ti;
20198+ return this_cpu_read_stable(current_tinfo);
20199 }
20200
20201 #else /* !__ASSEMBLY__ */
20202
20203 /* how to get the thread information struct from ASM */
20204 #define GET_THREAD_INFO(reg) \
20205- _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
20206- _ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ;
20207-
20208-/*
20209- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
20210- * a certain register (to be used in assembler memory operands).
20211- */
20212-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
20213+ _ASM_MOV PER_CPU_VAR(current_tinfo),reg ;
20214
20215 #endif
20216
20217@@ -237,5 +229,12 @@ static inline bool is_ia32_task(void)
20218 extern void arch_task_cache_init(void);
20219 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
20220 extern void arch_release_task_struct(struct task_struct *tsk);
20221+
20222+#define __HAVE_THREAD_FUNCTIONS
20223+#define task_thread_info(task) (&(task)->tinfo)
20224+#define task_stack_page(task) ((task)->stack)
20225+#define setup_thread_stack(p, org) do {} while (0)
20226+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
20227+
20228 #endif
20229 #endif /* _ASM_X86_THREAD_INFO_H */
20230diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
20231index 04905bf..1178cdf 100644
20232--- a/arch/x86/include/asm/tlbflush.h
20233+++ b/arch/x86/include/asm/tlbflush.h
20234@@ -17,18 +17,44 @@
20235
20236 static inline void __native_flush_tlb(void)
20237 {
20238+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
20239+ u64 descriptor[2];
20240+
20241+ descriptor[0] = PCID_KERNEL;
20242+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_NONGLOBAL) : "memory");
20243+ return;
20244+ }
20245+
20246+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20247+ if (static_cpu_has(X86_FEATURE_PCID)) {
20248+ unsigned int cpu = raw_get_cpu();
20249+
20250+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
20251+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
20252+ raw_put_cpu_no_resched();
20253+ return;
20254+ }
20255+#endif
20256+
20257 native_write_cr3(native_read_cr3());
20258 }
20259
20260 static inline void __native_flush_tlb_global_irq_disabled(void)
20261 {
20262- unsigned long cr4;
20263+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
20264+ u64 descriptor[2];
20265
20266- cr4 = native_read_cr4();
20267- /* clear PGE */
20268- native_write_cr4(cr4 & ~X86_CR4_PGE);
20269- /* write old PGE again and flush TLBs */
20270- native_write_cr4(cr4);
20271+ descriptor[0] = PCID_KERNEL;
20272+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
20273+ } else {
20274+ unsigned long cr4;
20275+
20276+ cr4 = native_read_cr4();
20277+ /* clear PGE */
20278+ native_write_cr4(cr4 & ~X86_CR4_PGE);
20279+ /* write old PGE again and flush TLBs */
20280+ native_write_cr4(cr4);
20281+ }
20282 }
20283
20284 static inline void __native_flush_tlb_global(void)
20285@@ -49,6 +75,41 @@ static inline void __native_flush_tlb_global(void)
20286
20287 static inline void __native_flush_tlb_single(unsigned long addr)
20288 {
20289+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
20290+ u64 descriptor[2];
20291+
20292+ descriptor[0] = PCID_KERNEL;
20293+ descriptor[1] = addr;
20294+
20295+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20296+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
20297+ if (addr < TASK_SIZE_MAX)
20298+ descriptor[1] += pax_user_shadow_base;
20299+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
20300+ }
20301+
20302+ descriptor[0] = PCID_USER;
20303+ descriptor[1] = addr;
20304+#endif
20305+
20306+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
20307+ return;
20308+ }
20309+
20310+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20311+ if (static_cpu_has(X86_FEATURE_PCID)) {
20312+ unsigned int cpu = raw_get_cpu();
20313+
20314+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
20315+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
20316+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
20317+ raw_put_cpu_no_resched();
20318+
20319+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
20320+ addr += pax_user_shadow_base;
20321+ }
20322+#endif
20323+
20324 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
20325 }
20326
20327diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
20328index 0d592e0..526f797 100644
20329--- a/arch/x86/include/asm/uaccess.h
20330+++ b/arch/x86/include/asm/uaccess.h
20331@@ -7,6 +7,7 @@
20332 #include <linux/compiler.h>
20333 #include <linux/thread_info.h>
20334 #include <linux/string.h>
20335+#include <linux/spinlock.h>
20336 #include <asm/asm.h>
20337 #include <asm/page.h>
20338 #include <asm/smap.h>
20339@@ -29,7 +30,12 @@
20340
20341 #define get_ds() (KERNEL_DS)
20342 #define get_fs() (current_thread_info()->addr_limit)
20343+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20344+void __set_fs(mm_segment_t x);
20345+void set_fs(mm_segment_t x);
20346+#else
20347 #define set_fs(x) (current_thread_info()->addr_limit = (x))
20348+#endif
20349
20350 #define segment_eq(a, b) ((a).seg == (b).seg)
20351
20352@@ -85,8 +91,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
20353 * checks that the pointer is in the user space range - after calling
20354 * this function, memory access functions may still return -EFAULT.
20355 */
20356-#define access_ok(type, addr, size) \
20357- likely(!__range_not_ok(addr, size, user_addr_max()))
20358+extern int _cond_resched(void);
20359+#define access_ok_noprefault(type, addr, size) (likely(!__range_not_ok(addr, size, user_addr_max())))
20360+#define access_ok(type, addr, size) \
20361+({ \
20362+ unsigned long __size = size; \
20363+ unsigned long __addr = (unsigned long)addr; \
20364+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
20365+ if (__ret_ao && __size) { \
20366+ unsigned long __addr_ao = __addr & PAGE_MASK; \
20367+ unsigned long __end_ao = __addr + __size - 1; \
20368+ if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
20369+ while (__addr_ao <= __end_ao) { \
20370+ char __c_ao; \
20371+ __addr_ao += PAGE_SIZE; \
20372+ if (__size > PAGE_SIZE) \
20373+ _cond_resched(); \
20374+ if (__get_user(__c_ao, (char __user *)__addr)) \
20375+ break; \
20376+ if (type != VERIFY_WRITE) { \
20377+ __addr = __addr_ao; \
20378+ continue; \
20379+ } \
20380+ if (__put_user(__c_ao, (char __user *)__addr)) \
20381+ break; \
20382+ __addr = __addr_ao; \
20383+ } \
20384+ } \
20385+ } \
20386+ __ret_ao; \
20387+})
20388
20389 /*
20390 * The exception table consists of pairs of addresses relative to the
20391@@ -176,10 +210,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
20392 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
20393 __chk_user_ptr(ptr); \
20394 might_fault(); \
20395+ pax_open_userland(); \
20396 asm volatile("call __get_user_%P3" \
20397 : "=a" (__ret_gu), "=r" (__val_gu) \
20398 : "0" (ptr), "i" (sizeof(*(ptr)))); \
20399 (x) = (__typeof__(*(ptr))) __val_gu; \
20400+ pax_close_userland(); \
20401 __ret_gu; \
20402 })
20403
20404@@ -187,13 +223,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
20405 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
20406 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
20407
20408-
20409+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20410+#define __copyuser_seg "gs;"
20411+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
20412+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
20413+#else
20414+#define __copyuser_seg
20415+#define __COPYUSER_SET_ES
20416+#define __COPYUSER_RESTORE_ES
20417+#endif
20418
20419 #ifdef CONFIG_X86_32
20420 #define __put_user_asm_u64(x, addr, err, errret) \
20421 asm volatile(ASM_STAC "\n" \
20422- "1: movl %%eax,0(%2)\n" \
20423- "2: movl %%edx,4(%2)\n" \
20424+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
20425+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
20426 "3: " ASM_CLAC "\n" \
20427 ".section .fixup,\"ax\"\n" \
20428 "4: movl %3,%0\n" \
20429@@ -206,8 +250,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
20430
20431 #define __put_user_asm_ex_u64(x, addr) \
20432 asm volatile(ASM_STAC "\n" \
20433- "1: movl %%eax,0(%1)\n" \
20434- "2: movl %%edx,4(%1)\n" \
20435+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
20436+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
20437 "3: " ASM_CLAC "\n" \
20438 _ASM_EXTABLE_EX(1b, 2b) \
20439 _ASM_EXTABLE_EX(2b, 3b) \
20440@@ -257,7 +301,8 @@ extern void __put_user_8(void);
20441 __typeof__(*(ptr)) __pu_val; \
20442 __chk_user_ptr(ptr); \
20443 might_fault(); \
20444- __pu_val = x; \
20445+ __pu_val = (x); \
20446+ pax_open_userland(); \
20447 switch (sizeof(*(ptr))) { \
20448 case 1: \
20449 __put_user_x(1, __pu_val, ptr, __ret_pu); \
20450@@ -275,6 +320,7 @@ extern void __put_user_8(void);
20451 __put_user_x(X, __pu_val, ptr, __ret_pu); \
20452 break; \
20453 } \
20454+ pax_close_userland(); \
20455 __ret_pu; \
20456 })
20457
20458@@ -355,8 +401,10 @@ do { \
20459 } while (0)
20460
20461 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
20462+do { \
20463+ pax_open_userland(); \
20464 asm volatile(ASM_STAC "\n" \
20465- "1: mov"itype" %2,%"rtype"1\n" \
20466+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
20467 "2: " ASM_CLAC "\n" \
20468 ".section .fixup,\"ax\"\n" \
20469 "3: mov %3,%0\n" \
20470@@ -364,8 +412,10 @@ do { \
20471 " jmp 2b\n" \
20472 ".previous\n" \
20473 _ASM_EXTABLE(1b, 3b) \
20474- : "=r" (err), ltype(x) \
20475- : "m" (__m(addr)), "i" (errret), "0" (err))
20476+ : "=r" (err), ltype (x) \
20477+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
20478+ pax_close_userland(); \
20479+} while (0)
20480
20481 #define __get_user_size_ex(x, ptr, size) \
20482 do { \
20483@@ -389,7 +439,7 @@ do { \
20484 } while (0)
20485
20486 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
20487- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
20488+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
20489 "2:\n" \
20490 _ASM_EXTABLE_EX(1b, 2b) \
20491 : ltype(x) : "m" (__m(addr)))
20492@@ -406,13 +456,24 @@ do { \
20493 int __gu_err; \
20494 unsigned long __gu_val; \
20495 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
20496- (x) = (__force __typeof__(*(ptr)))__gu_val; \
20497+ (x) = (__typeof__(*(ptr)))__gu_val; \
20498 __gu_err; \
20499 })
20500
20501 /* FIXME: this hack is definitely wrong -AK */
20502 struct __large_struct { unsigned long buf[100]; };
20503-#define __m(x) (*(struct __large_struct __user *)(x))
20504+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20505+#define ____m(x) \
20506+({ \
20507+ unsigned long ____x = (unsigned long)(x); \
20508+ if (____x < pax_user_shadow_base) \
20509+ ____x += pax_user_shadow_base; \
20510+ (typeof(x))____x; \
20511+})
20512+#else
20513+#define ____m(x) (x)
20514+#endif
20515+#define __m(x) (*(struct __large_struct __user *)____m(x))
20516
20517 /*
20518 * Tell gcc we read from memory instead of writing: this is because
20519@@ -420,8 +481,10 @@ struct __large_struct { unsigned long buf[100]; };
20520 * aliasing issues.
20521 */
20522 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
20523+do { \
20524+ pax_open_userland(); \
20525 asm volatile(ASM_STAC "\n" \
20526- "1: mov"itype" %"rtype"1,%2\n" \
20527+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
20528 "2: " ASM_CLAC "\n" \
20529 ".section .fixup,\"ax\"\n" \
20530 "3: mov %3,%0\n" \
20531@@ -429,10 +492,12 @@ struct __large_struct { unsigned long buf[100]; };
20532 ".previous\n" \
20533 _ASM_EXTABLE(1b, 3b) \
20534 : "=r"(err) \
20535- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
20536+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
20537+ pax_close_userland(); \
20538+} while (0)
20539
20540 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
20541- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
20542+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
20543 "2:\n" \
20544 _ASM_EXTABLE_EX(1b, 2b) \
20545 : : ltype(x), "m" (__m(addr)))
20546@@ -442,11 +507,13 @@ struct __large_struct { unsigned long buf[100]; };
20547 */
20548 #define uaccess_try do { \
20549 current_thread_info()->uaccess_err = 0; \
20550+ pax_open_userland(); \
20551 stac(); \
20552 barrier();
20553
20554 #define uaccess_catch(err) \
20555 clac(); \
20556+ pax_close_userland(); \
20557 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
20558 } while (0)
20559
20560@@ -471,8 +538,12 @@ struct __large_struct { unsigned long buf[100]; };
20561 * On error, the variable @x is set to zero.
20562 */
20563
20564+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20565+#define __get_user(x, ptr) get_user((x), (ptr))
20566+#else
20567 #define __get_user(x, ptr) \
20568 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
20569+#endif
20570
20571 /**
20572 * __put_user: - Write a simple value into user space, with less checking.
20573@@ -494,8 +565,12 @@ struct __large_struct { unsigned long buf[100]; };
20574 * Returns zero on success, or -EFAULT on error.
20575 */
20576
20577+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20578+#define __put_user(x, ptr) put_user((x), (ptr))
20579+#else
20580 #define __put_user(x, ptr) \
20581 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
20582+#endif
20583
20584 #define __get_user_unaligned __get_user
20585 #define __put_user_unaligned __put_user
20586@@ -513,7 +588,7 @@ struct __large_struct { unsigned long buf[100]; };
20587 #define get_user_ex(x, ptr) do { \
20588 unsigned long __gue_val; \
20589 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
20590- (x) = (__force __typeof__(*(ptr)))__gue_val; \
20591+ (x) = (__typeof__(*(ptr)))__gue_val; \
20592 } while (0)
20593
20594 #define put_user_try uaccess_try
20595@@ -542,18 +617,19 @@ extern void __cmpxchg_wrong_size(void)
20596 __typeof__(ptr) __uval = (uval); \
20597 __typeof__(*(ptr)) __old = (old); \
20598 __typeof__(*(ptr)) __new = (new); \
20599+ pax_open_userland(); \
20600 switch (size) { \
20601 case 1: \
20602 { \
20603 asm volatile("\t" ASM_STAC "\n" \
20604- "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
20605+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgb %4, %2\n"\
20606 "2:\t" ASM_CLAC "\n" \
20607 "\t.section .fixup, \"ax\"\n" \
20608 "3:\tmov %3, %0\n" \
20609 "\tjmp 2b\n" \
20610 "\t.previous\n" \
20611 _ASM_EXTABLE(1b, 3b) \
20612- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20613+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20614 : "i" (-EFAULT), "q" (__new), "1" (__old) \
20615 : "memory" \
20616 ); \
20617@@ -562,14 +638,14 @@ extern void __cmpxchg_wrong_size(void)
20618 case 2: \
20619 { \
20620 asm volatile("\t" ASM_STAC "\n" \
20621- "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
20622+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgw %4, %2\n"\
20623 "2:\t" ASM_CLAC "\n" \
20624 "\t.section .fixup, \"ax\"\n" \
20625 "3:\tmov %3, %0\n" \
20626 "\tjmp 2b\n" \
20627 "\t.previous\n" \
20628 _ASM_EXTABLE(1b, 3b) \
20629- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20630+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20631 : "i" (-EFAULT), "r" (__new), "1" (__old) \
20632 : "memory" \
20633 ); \
20634@@ -578,14 +654,14 @@ extern void __cmpxchg_wrong_size(void)
20635 case 4: \
20636 { \
20637 asm volatile("\t" ASM_STAC "\n" \
20638- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
20639+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"\
20640 "2:\t" ASM_CLAC "\n" \
20641 "\t.section .fixup, \"ax\"\n" \
20642 "3:\tmov %3, %0\n" \
20643 "\tjmp 2b\n" \
20644 "\t.previous\n" \
20645 _ASM_EXTABLE(1b, 3b) \
20646- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20647+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20648 : "i" (-EFAULT), "r" (__new), "1" (__old) \
20649 : "memory" \
20650 ); \
20651@@ -597,14 +673,14 @@ extern void __cmpxchg_wrong_size(void)
20652 __cmpxchg_wrong_size(); \
20653 \
20654 asm volatile("\t" ASM_STAC "\n" \
20655- "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
20656+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgq %4, %2\n"\
20657 "2:\t" ASM_CLAC "\n" \
20658 "\t.section .fixup, \"ax\"\n" \
20659 "3:\tmov %3, %0\n" \
20660 "\tjmp 2b\n" \
20661 "\t.previous\n" \
20662 _ASM_EXTABLE(1b, 3b) \
20663- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20664+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20665 : "i" (-EFAULT), "r" (__new), "1" (__old) \
20666 : "memory" \
20667 ); \
20668@@ -613,6 +689,7 @@ extern void __cmpxchg_wrong_size(void)
20669 default: \
20670 __cmpxchg_wrong_size(); \
20671 } \
20672+ pax_close_userland(); \
20673 *__uval = __old; \
20674 __ret; \
20675 })
20676@@ -636,17 +713,6 @@ extern struct movsl_mask {
20677
20678 #define ARCH_HAS_NOCACHE_UACCESS 1
20679
20680-#ifdef CONFIG_X86_32
20681-# include <asm/uaccess_32.h>
20682-#else
20683-# include <asm/uaccess_64.h>
20684-#endif
20685-
20686-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
20687- unsigned n);
20688-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
20689- unsigned n);
20690-
20691 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
20692 # define copy_user_diag __compiletime_error
20693 #else
20694@@ -656,7 +722,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
20695 extern void copy_user_diag("copy_from_user() buffer size is too small")
20696 copy_from_user_overflow(void);
20697 extern void copy_user_diag("copy_to_user() buffer size is too small")
20698-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
20699+copy_to_user_overflow(void);
20700
20701 #undef copy_user_diag
20702
20703@@ -669,7 +735,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
20704
20705 extern void
20706 __compiletime_warning("copy_to_user() buffer size is not provably correct")
20707-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
20708+__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
20709 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
20710
20711 #else
20712@@ -684,10 +750,16 @@ __copy_from_user_overflow(int size, unsigned long count)
20713
20714 #endif
20715
20716+#ifdef CONFIG_X86_32
20717+# include <asm/uaccess_32.h>
20718+#else
20719+# include <asm/uaccess_64.h>
20720+#endif
20721+
20722 static inline unsigned long __must_check
20723 copy_from_user(void *to, const void __user *from, unsigned long n)
20724 {
20725- int sz = __compiletime_object_size(to);
20726+ size_t sz = __compiletime_object_size(to);
20727
20728 might_fault();
20729
20730@@ -709,12 +781,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
20731 * case, and do only runtime checking for non-constant sizes.
20732 */
20733
20734- if (likely(sz < 0 || sz >= n))
20735- n = _copy_from_user(to, from, n);
20736- else if(__builtin_constant_p(n))
20737- copy_from_user_overflow();
20738- else
20739- __copy_from_user_overflow(sz, n);
20740+ if (likely(sz != (size_t)-1 && sz < n)) {
20741+ if(__builtin_constant_p(n))
20742+ copy_from_user_overflow();
20743+ else
20744+ __copy_from_user_overflow(sz, n);
20745+ } else if (access_ok(VERIFY_READ, from, n))
20746+ n = __copy_from_user(to, from, n);
20747+ else if ((long)n > 0)
20748+ memset(to, 0, n);
20749
20750 return n;
20751 }
20752@@ -722,17 +797,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
20753 static inline unsigned long __must_check
20754 copy_to_user(void __user *to, const void *from, unsigned long n)
20755 {
20756- int sz = __compiletime_object_size(from);
20757+ size_t sz = __compiletime_object_size(from);
20758
20759 might_fault();
20760
20761 /* See the comment in copy_from_user() above. */
20762- if (likely(sz < 0 || sz >= n))
20763- n = _copy_to_user(to, from, n);
20764- else if(__builtin_constant_p(n))
20765- copy_to_user_overflow();
20766- else
20767- __copy_to_user_overflow(sz, n);
20768+ if (likely(sz != (size_t)-1 && sz < n)) {
20769+ if(__builtin_constant_p(n))
20770+ copy_to_user_overflow();
20771+ else
20772+ __copy_to_user_overflow(sz, n);
20773+ } else if (access_ok(VERIFY_WRITE, to, n))
20774+ n = __copy_to_user(to, from, n);
20775
20776 return n;
20777 }
20778diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
20779index 3c03a5d..1071638 100644
20780--- a/arch/x86/include/asm/uaccess_32.h
20781+++ b/arch/x86/include/asm/uaccess_32.h
20782@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
20783 static __always_inline unsigned long __must_check
20784 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
20785 {
20786+ if ((long)n < 0)
20787+ return n;
20788+
20789+ check_object_size(from, n, true);
20790+
20791 if (__builtin_constant_p(n)) {
20792 unsigned long ret;
20793
20794@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
20795 __copy_to_user(void __user *to, const void *from, unsigned long n)
20796 {
20797 might_fault();
20798+
20799 return __copy_to_user_inatomic(to, from, n);
20800 }
20801
20802 static __always_inline unsigned long
20803 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
20804 {
20805+ if ((long)n < 0)
20806+ return n;
20807+
20808 /* Avoid zeroing the tail if the copy fails..
20809 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
20810 * but as the zeroing behaviour is only significant when n is not
20811@@ -137,6 +146,12 @@ static __always_inline unsigned long
20812 __copy_from_user(void *to, const void __user *from, unsigned long n)
20813 {
20814 might_fault();
20815+
20816+ if ((long)n < 0)
20817+ return n;
20818+
20819+ check_object_size(to, n, false);
20820+
20821 if (__builtin_constant_p(n)) {
20822 unsigned long ret;
20823
20824@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
20825 const void __user *from, unsigned long n)
20826 {
20827 might_fault();
20828+
20829+ if ((long)n < 0)
20830+ return n;
20831+
20832 if (__builtin_constant_p(n)) {
20833 unsigned long ret;
20834
20835@@ -181,7 +200,10 @@ static __always_inline unsigned long
20836 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
20837 unsigned long n)
20838 {
20839- return __copy_from_user_ll_nocache_nozero(to, from, n);
20840+ if ((long)n < 0)
20841+ return n;
20842+
20843+ return __copy_from_user_ll_nocache_nozero(to, from, n);
20844 }
20845
20846 #endif /* _ASM_X86_UACCESS_32_H */
20847diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
20848index 12a26b9..206c200 100644
20849--- a/arch/x86/include/asm/uaccess_64.h
20850+++ b/arch/x86/include/asm/uaccess_64.h
20851@@ -10,6 +10,9 @@
20852 #include <asm/alternative.h>
20853 #include <asm/cpufeature.h>
20854 #include <asm/page.h>
20855+#include <asm/pgtable.h>
20856+
20857+#define set_fs(x) (current_thread_info()->addr_limit = (x))
20858
20859 /*
20860 * Copy To/From Userspace
20861@@ -17,14 +20,14 @@
20862
20863 /* Handles exceptions in both to and from, but doesn't do access_ok */
20864 __must_check unsigned long
20865-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
20866+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
20867 __must_check unsigned long
20868-copy_user_generic_string(void *to, const void *from, unsigned len);
20869+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
20870 __must_check unsigned long
20871-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
20872+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
20873
20874 static __always_inline __must_check unsigned long
20875-copy_user_generic(void *to, const void *from, unsigned len)
20876+copy_user_generic(void *to, const void *from, unsigned long len)
20877 {
20878 unsigned ret;
20879
20880@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
20881 }
20882
20883 __must_check unsigned long
20884-copy_in_user(void __user *to, const void __user *from, unsigned len);
20885+copy_in_user(void __user *to, const void __user *from, unsigned long len);
20886
20887 static __always_inline __must_check
20888-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
20889+unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
20890 {
20891- int ret = 0;
20892+ size_t sz = __compiletime_object_size(dst);
20893+ unsigned ret = 0;
20894+
20895+ if (size > INT_MAX)
20896+ return size;
20897+
20898+ check_object_size(dst, size, false);
20899+
20900+#ifdef CONFIG_PAX_MEMORY_UDEREF
20901+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20902+ return size;
20903+#endif
20904+
20905+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20906+ if(__builtin_constant_p(size))
20907+ copy_from_user_overflow();
20908+ else
20909+ __copy_from_user_overflow(sz, size);
20910+ return size;
20911+ }
20912
20913 if (!__builtin_constant_p(size))
20914- return copy_user_generic(dst, (__force void *)src, size);
20915+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20916 switch (size) {
20917- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
20918+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
20919 ret, "b", "b", "=q", 1);
20920 return ret;
20921- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
20922+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
20923 ret, "w", "w", "=r", 2);
20924 return ret;
20925- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
20926+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
20927 ret, "l", "k", "=r", 4);
20928 return ret;
20929- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
20930+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20931 ret, "q", "", "=r", 8);
20932 return ret;
20933 case 10:
20934- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20935+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20936 ret, "q", "", "=r", 10);
20937 if (unlikely(ret))
20938 return ret;
20939 __get_user_asm(*(u16 *)(8 + (char *)dst),
20940- (u16 __user *)(8 + (char __user *)src),
20941+ (const u16 __user *)(8 + (const char __user *)src),
20942 ret, "w", "w", "=r", 2);
20943 return ret;
20944 case 16:
20945- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20946+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20947 ret, "q", "", "=r", 16);
20948 if (unlikely(ret))
20949 return ret;
20950 __get_user_asm(*(u64 *)(8 + (char *)dst),
20951- (u64 __user *)(8 + (char __user *)src),
20952+ (const u64 __user *)(8 + (const char __user *)src),
20953 ret, "q", "", "=r", 8);
20954 return ret;
20955 default:
20956- return copy_user_generic(dst, (__force void *)src, size);
20957+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20958 }
20959 }
20960
20961 static __always_inline __must_check
20962-int __copy_from_user(void *dst, const void __user *src, unsigned size)
20963+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
20964 {
20965 might_fault();
20966 return __copy_from_user_nocheck(dst, src, size);
20967 }
20968
20969 static __always_inline __must_check
20970-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
20971+unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
20972 {
20973- int ret = 0;
20974+ size_t sz = __compiletime_object_size(src);
20975+ unsigned ret = 0;
20976+
20977+ if (size > INT_MAX)
20978+ return size;
20979+
20980+ check_object_size(src, size, true);
20981+
20982+#ifdef CONFIG_PAX_MEMORY_UDEREF
20983+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20984+ return size;
20985+#endif
20986+
20987+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20988+ if(__builtin_constant_p(size))
20989+ copy_to_user_overflow();
20990+ else
20991+ __copy_to_user_overflow(sz, size);
20992+ return size;
20993+ }
20994
20995 if (!__builtin_constant_p(size))
20996- return copy_user_generic((__force void *)dst, src, size);
20997+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20998 switch (size) {
20999- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
21000+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
21001 ret, "b", "b", "iq", 1);
21002 return ret;
21003- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
21004+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
21005 ret, "w", "w", "ir", 2);
21006 return ret;
21007- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
21008+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
21009 ret, "l", "k", "ir", 4);
21010 return ret;
21011- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
21012+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
21013 ret, "q", "", "er", 8);
21014 return ret;
21015 case 10:
21016- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
21017+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
21018 ret, "q", "", "er", 10);
21019 if (unlikely(ret))
21020 return ret;
21021 asm("":::"memory");
21022- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
21023+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
21024 ret, "w", "w", "ir", 2);
21025 return ret;
21026 case 16:
21027- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
21028+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
21029 ret, "q", "", "er", 16);
21030 if (unlikely(ret))
21031 return ret;
21032 asm("":::"memory");
21033- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
21034+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
21035 ret, "q", "", "er", 8);
21036 return ret;
21037 default:
21038- return copy_user_generic((__force void *)dst, src, size);
21039+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
21040 }
21041 }
21042
21043 static __always_inline __must_check
21044-int __copy_to_user(void __user *dst, const void *src, unsigned size)
21045+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
21046 {
21047 might_fault();
21048 return __copy_to_user_nocheck(dst, src, size);
21049 }
21050
21051 static __always_inline __must_check
21052-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
21053+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
21054 {
21055- int ret = 0;
21056+ unsigned ret = 0;
21057
21058 might_fault();
21059+
21060+ if (size > INT_MAX)
21061+ return size;
21062+
21063+#ifdef CONFIG_PAX_MEMORY_UDEREF
21064+ if (!access_ok_noprefault(VERIFY_READ, src, size))
21065+ return size;
21066+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
21067+ return size;
21068+#endif
21069+
21070 if (!__builtin_constant_p(size))
21071- return copy_user_generic((__force void *)dst,
21072- (__force void *)src, size);
21073+ return copy_user_generic((__force_kernel void *)____m(dst),
21074+ (__force_kernel const void *)____m(src), size);
21075 switch (size) {
21076 case 1: {
21077 u8 tmp;
21078- __get_user_asm(tmp, (u8 __user *)src,
21079+ __get_user_asm(tmp, (const u8 __user *)src,
21080 ret, "b", "b", "=q", 1);
21081 if (likely(!ret))
21082 __put_user_asm(tmp, (u8 __user *)dst,
21083@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
21084 }
21085 case 2: {
21086 u16 tmp;
21087- __get_user_asm(tmp, (u16 __user *)src,
21088+ __get_user_asm(tmp, (const u16 __user *)src,
21089 ret, "w", "w", "=r", 2);
21090 if (likely(!ret))
21091 __put_user_asm(tmp, (u16 __user *)dst,
21092@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
21093
21094 case 4: {
21095 u32 tmp;
21096- __get_user_asm(tmp, (u32 __user *)src,
21097+ __get_user_asm(tmp, (const u32 __user *)src,
21098 ret, "l", "k", "=r", 4);
21099 if (likely(!ret))
21100 __put_user_asm(tmp, (u32 __user *)dst,
21101@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
21102 }
21103 case 8: {
21104 u64 tmp;
21105- __get_user_asm(tmp, (u64 __user *)src,
21106+ __get_user_asm(tmp, (const u64 __user *)src,
21107 ret, "q", "", "=r", 8);
21108 if (likely(!ret))
21109 __put_user_asm(tmp, (u64 __user *)dst,
21110@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
21111 return ret;
21112 }
21113 default:
21114- return copy_user_generic((__force void *)dst,
21115- (__force void *)src, size);
21116+ return copy_user_generic((__force_kernel void *)____m(dst),
21117+ (__force_kernel const void *)____m(src), size);
21118 }
21119 }
21120
21121-static __must_check __always_inline int
21122-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
21123+static __must_check __always_inline unsigned long
21124+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
21125 {
21126 return __copy_from_user_nocheck(dst, src, size);
21127 }
21128
21129-static __must_check __always_inline int
21130-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
21131+static __must_check __always_inline unsigned long
21132+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
21133 {
21134 return __copy_to_user_nocheck(dst, src, size);
21135 }
21136
21137-extern long __copy_user_nocache(void *dst, const void __user *src,
21138- unsigned size, int zerorest);
21139+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
21140+ unsigned long size, int zerorest);
21141
21142-static inline int
21143-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
21144+static inline unsigned long
21145+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
21146 {
21147 might_fault();
21148+
21149+ if (size > INT_MAX)
21150+ return size;
21151+
21152+#ifdef CONFIG_PAX_MEMORY_UDEREF
21153+ if (!access_ok_noprefault(VERIFY_READ, src, size))
21154+ return size;
21155+#endif
21156+
21157 return __copy_user_nocache(dst, src, size, 1);
21158 }
21159
21160-static inline int
21161+static inline unsigned long
21162 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
21163- unsigned size)
21164+ unsigned long size)
21165 {
21166+ if (size > INT_MAX)
21167+ return size;
21168+
21169+#ifdef CONFIG_PAX_MEMORY_UDEREF
21170+ if (!access_ok_noprefault(VERIFY_READ, src, size))
21171+ return size;
21172+#endif
21173+
21174 return __copy_user_nocache(dst, src, size, 0);
21175 }
21176
21177 unsigned long
21178-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
21179+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
21180
21181 #endif /* _ASM_X86_UACCESS_64_H */
21182diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
21183index 5b238981..77fdd78 100644
21184--- a/arch/x86/include/asm/word-at-a-time.h
21185+++ b/arch/x86/include/asm/word-at-a-time.h
21186@@ -11,7 +11,7 @@
21187 * and shift, for example.
21188 */
21189 struct word_at_a_time {
21190- const unsigned long one_bits, high_bits;
21191+ unsigned long one_bits, high_bits;
21192 };
21193
21194 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
21195diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
21196index e45e4da..44e8572 100644
21197--- a/arch/x86/include/asm/x86_init.h
21198+++ b/arch/x86/include/asm/x86_init.h
21199@@ -129,7 +129,7 @@ struct x86_init_ops {
21200 struct x86_init_timers timers;
21201 struct x86_init_iommu iommu;
21202 struct x86_init_pci pci;
21203-};
21204+} __no_const;
21205
21206 /**
21207 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
21208@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
21209 void (*setup_percpu_clockev)(void);
21210 void (*early_percpu_clock_init)(void);
21211 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
21212-};
21213+} __no_const;
21214
21215 struct timespec;
21216
21217@@ -168,7 +168,7 @@ struct x86_platform_ops {
21218 void (*save_sched_clock_state)(void);
21219 void (*restore_sched_clock_state)(void);
21220 void (*apic_post_init)(void);
21221-};
21222+} __no_const;
21223
21224 struct pci_dev;
21225 struct msi_msg;
21226@@ -185,7 +185,7 @@ struct x86_msi_ops {
21227 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
21228 u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag);
21229 u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag);
21230-};
21231+} __no_const;
21232
21233 struct IO_APIC_route_entry;
21234 struct io_apic_irq_attr;
21235@@ -206,7 +206,7 @@ struct x86_io_apic_ops {
21236 unsigned int destination, int vector,
21237 struct io_apic_irq_attr *attr);
21238 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
21239-};
21240+} __no_const;
21241
21242 extern struct x86_init_ops x86_init;
21243 extern struct x86_cpuinit_ops x86_cpuinit;
21244diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
21245index c949923..c22bfa4 100644
21246--- a/arch/x86/include/asm/xen/page.h
21247+++ b/arch/x86/include/asm/xen/page.h
21248@@ -63,7 +63,7 @@ extern int m2p_remove_override(struct page *page,
21249 extern struct page *m2p_find_override(unsigned long mfn);
21250 extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
21251
21252-static inline unsigned long pfn_to_mfn(unsigned long pfn)
21253+static inline unsigned long __intentional_overflow(-1) pfn_to_mfn(unsigned long pfn)
21254 {
21255 unsigned long mfn;
21256
21257diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
21258index 7e7a79a..0824666 100644
21259--- a/arch/x86/include/asm/xsave.h
21260+++ b/arch/x86/include/asm/xsave.h
21261@@ -228,12 +228,16 @@ static inline int xsave_user(struct xsave_struct __user *buf)
21262 if (unlikely(err))
21263 return -EFAULT;
21264
21265+ pax_open_userland();
21266 __asm__ __volatile__(ASM_STAC "\n"
21267- "1:"XSAVE"\n"
21268+ "1:"
21269+ __copyuser_seg
21270+ XSAVE"\n"
21271 "2: " ASM_CLAC "\n"
21272 xstate_fault
21273 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
21274 : "memory");
21275+ pax_close_userland();
21276 return err;
21277 }
21278
21279@@ -243,16 +247,20 @@ static inline int xsave_user(struct xsave_struct __user *buf)
21280 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
21281 {
21282 int err = 0;
21283- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
21284+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
21285 u32 lmask = mask;
21286 u32 hmask = mask >> 32;
21287
21288+ pax_open_userland();
21289 __asm__ __volatile__(ASM_STAC "\n"
21290- "1:"XRSTOR"\n"
21291+ "1:"
21292+ __copyuser_seg
21293+ XRSTOR"\n"
21294 "2: " ASM_CLAC "\n"
21295 xstate_fault
21296 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
21297 : "memory"); /* memory required? */
21298+ pax_close_userland();
21299 return err;
21300 }
21301
21302diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
21303index bbae024..e1528f9 100644
21304--- a/arch/x86/include/uapi/asm/e820.h
21305+++ b/arch/x86/include/uapi/asm/e820.h
21306@@ -63,7 +63,7 @@ struct e820map {
21307 #define ISA_START_ADDRESS 0xa0000
21308 #define ISA_END_ADDRESS 0x100000
21309
21310-#define BIOS_BEGIN 0x000a0000
21311+#define BIOS_BEGIN 0x000c0000
21312 #define BIOS_END 0x00100000
21313
21314 #define BIOS_ROM_BASE 0xffe00000
21315diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
21316index 7b0a55a..ad115bf 100644
21317--- a/arch/x86/include/uapi/asm/ptrace-abi.h
21318+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
21319@@ -49,7 +49,6 @@
21320 #define EFLAGS 144
21321 #define RSP 152
21322 #define SS 160
21323-#define ARGOFFSET R11
21324 #endif /* __ASSEMBLY__ */
21325
21326 /* top of stack page */
21327diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
21328index 0e79420..990a2fe 100644
21329--- a/arch/x86/include/uapi/asm/vmx.h
21330+++ b/arch/x86/include/uapi/asm/vmx.h
21331@@ -67,6 +67,7 @@
21332 #define EXIT_REASON_EPT_MISCONFIG 49
21333 #define EXIT_REASON_INVEPT 50
21334 #define EXIT_REASON_PREEMPTION_TIMER 52
21335+#define EXIT_REASON_INVVPID 53
21336 #define EXIT_REASON_WBINVD 54
21337 #define EXIT_REASON_XSETBV 55
21338 #define EXIT_REASON_APIC_WRITE 56
21339@@ -114,6 +115,7 @@
21340 { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \
21341 { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \
21342 { EXIT_REASON_INVD, "INVD" }, \
21343+ { EXIT_REASON_INVVPID, "INVVPID" }, \
21344 { EXIT_REASON_INVPCID, "INVPCID" }
21345
21346 #endif /* _UAPIVMX_H */
21347diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
21348index ada2e2d..ca69e16 100644
21349--- a/arch/x86/kernel/Makefile
21350+++ b/arch/x86/kernel/Makefile
21351@@ -24,7 +24,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
21352 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
21353 obj-$(CONFIG_IRQ_WORK) += irq_work.o
21354 obj-y += probe_roms.o
21355-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
21356+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
21357 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
21358 obj-$(CONFIG_X86_64) += mcount_64.o
21359 obj-y += syscall_$(BITS).o vsyscall_gtod.o
21360diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
21361index b436fc7..1ba7044 100644
21362--- a/arch/x86/kernel/acpi/boot.c
21363+++ b/arch/x86/kernel/acpi/boot.c
21364@@ -1272,7 +1272,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
21365 * If your system is blacklisted here, but you find that acpi=force
21366 * works for you, please contact linux-acpi@vger.kernel.org
21367 */
21368-static struct dmi_system_id __initdata acpi_dmi_table[] = {
21369+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
21370 /*
21371 * Boxes that need ACPI disabled
21372 */
21373@@ -1347,7 +1347,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
21374 };
21375
21376 /* second table for DMI checks that should run after early-quirks */
21377-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
21378+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
21379 /*
21380 * HP laptops which use a DSDT reporting as HP/SB400/10000,
21381 * which includes some code which overrides all temperature
21382diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
21383index 3136820..e2c6577 100644
21384--- a/arch/x86/kernel/acpi/sleep.c
21385+++ b/arch/x86/kernel/acpi/sleep.c
21386@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
21387 #else /* CONFIG_64BIT */
21388 #ifdef CONFIG_SMP
21389 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
21390+
21391+ pax_open_kernel();
21392 early_gdt_descr.address =
21393 (unsigned long)get_cpu_gdt_table(smp_processor_id());
21394+ pax_close_kernel();
21395+
21396 initial_gs = per_cpu_offset(smp_processor_id());
21397 #endif
21398 initial_code = (unsigned long)wakeup_long64;
21399diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
21400index 665c6b7..eae4d56 100644
21401--- a/arch/x86/kernel/acpi/wakeup_32.S
21402+++ b/arch/x86/kernel/acpi/wakeup_32.S
21403@@ -29,13 +29,11 @@ wakeup_pmode_return:
21404 # and restore the stack ... but you need gdt for this to work
21405 movl saved_context_esp, %esp
21406
21407- movl %cs:saved_magic, %eax
21408- cmpl $0x12345678, %eax
21409+ cmpl $0x12345678, saved_magic
21410 jne bogus_magic
21411
21412 # jump to place where we left off
21413- movl saved_eip, %eax
21414- jmp *%eax
21415+ jmp *(saved_eip)
21416
21417 bogus_magic:
21418 jmp bogus_magic
21419diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
21420index 703130f..27a155d 100644
21421--- a/arch/x86/kernel/alternative.c
21422+++ b/arch/x86/kernel/alternative.c
21423@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
21424 */
21425 for (a = start; a < end; a++) {
21426 instr = (u8 *)&a->instr_offset + a->instr_offset;
21427+
21428+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21429+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
21430+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
21431+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
21432+#endif
21433+
21434 replacement = (u8 *)&a->repl_offset + a->repl_offset;
21435 BUG_ON(a->replacementlen > a->instrlen);
21436 BUG_ON(a->instrlen > sizeof(insnbuf));
21437@@ -284,6 +291,11 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
21438 add_nops(insnbuf + a->replacementlen,
21439 a->instrlen - a->replacementlen);
21440
21441+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21442+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
21443+ instr = ktva_ktla(instr);
21444+#endif
21445+
21446 text_poke_early(instr, insnbuf, a->instrlen);
21447 }
21448 }
21449@@ -299,10 +311,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
21450 for (poff = start; poff < end; poff++) {
21451 u8 *ptr = (u8 *)poff + *poff;
21452
21453+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21454+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
21455+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
21456+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
21457+#endif
21458+
21459 if (!*poff || ptr < text || ptr >= text_end)
21460 continue;
21461 /* turn DS segment override prefix into lock prefix */
21462- if (*ptr == 0x3e)
21463+ if (*ktla_ktva(ptr) == 0x3e)
21464 text_poke(ptr, ((unsigned char []){0xf0}), 1);
21465 }
21466 mutex_unlock(&text_mutex);
21467@@ -317,10 +335,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
21468 for (poff = start; poff < end; poff++) {
21469 u8 *ptr = (u8 *)poff + *poff;
21470
21471+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21472+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
21473+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
21474+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
21475+#endif
21476+
21477 if (!*poff || ptr < text || ptr >= text_end)
21478 continue;
21479 /* turn lock prefix into DS segment override prefix */
21480- if (*ptr == 0xf0)
21481+ if (*ktla_ktva(ptr) == 0xf0)
21482 text_poke(ptr, ((unsigned char []){0x3E}), 1);
21483 }
21484 mutex_unlock(&text_mutex);
21485@@ -457,7 +481,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
21486
21487 BUG_ON(p->len > MAX_PATCH_LEN);
21488 /* prep the buffer with the original instructions */
21489- memcpy(insnbuf, p->instr, p->len);
21490+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
21491 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
21492 (unsigned long)p->instr, p->len);
21493
21494@@ -504,7 +528,7 @@ void __init alternative_instructions(void)
21495 if (!uniproc_patched || num_possible_cpus() == 1)
21496 free_init_pages("SMP alternatives",
21497 (unsigned long)__smp_locks,
21498- (unsigned long)__smp_locks_end);
21499+ PAGE_ALIGN((unsigned long)__smp_locks_end));
21500 #endif
21501
21502 apply_paravirt(__parainstructions, __parainstructions_end);
21503@@ -524,13 +548,17 @@ void __init alternative_instructions(void)
21504 * instructions. And on the local CPU you need to be protected again NMI or MCE
21505 * handlers seeing an inconsistent instruction while you patch.
21506 */
21507-void *__init_or_module text_poke_early(void *addr, const void *opcode,
21508+void *__kprobes text_poke_early(void *addr, const void *opcode,
21509 size_t len)
21510 {
21511 unsigned long flags;
21512 local_irq_save(flags);
21513- memcpy(addr, opcode, len);
21514+
21515+ pax_open_kernel();
21516+ memcpy(ktla_ktva(addr), opcode, len);
21517 sync_core();
21518+ pax_close_kernel();
21519+
21520 local_irq_restore(flags);
21521 /* Could also do a CLFLUSH here to speed up CPU recovery; but
21522 that causes hangs on some VIA CPUs. */
21523@@ -552,36 +580,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
21524 */
21525 void *text_poke(void *addr, const void *opcode, size_t len)
21526 {
21527- unsigned long flags;
21528- char *vaddr;
21529+ unsigned char *vaddr = ktla_ktva(addr);
21530 struct page *pages[2];
21531- int i;
21532+ size_t i;
21533
21534 if (!core_kernel_text((unsigned long)addr)) {
21535- pages[0] = vmalloc_to_page(addr);
21536- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
21537+ pages[0] = vmalloc_to_page(vaddr);
21538+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
21539 } else {
21540- pages[0] = virt_to_page(addr);
21541+ pages[0] = virt_to_page(vaddr);
21542 WARN_ON(!PageReserved(pages[0]));
21543- pages[1] = virt_to_page(addr + PAGE_SIZE);
21544+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
21545 }
21546 BUG_ON(!pages[0]);
21547- local_irq_save(flags);
21548- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
21549- if (pages[1])
21550- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
21551- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
21552- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
21553- clear_fixmap(FIX_TEXT_POKE0);
21554- if (pages[1])
21555- clear_fixmap(FIX_TEXT_POKE1);
21556- local_flush_tlb();
21557- sync_core();
21558- /* Could also do a CLFLUSH here to speed up CPU recovery; but
21559- that causes hangs on some VIA CPUs. */
21560+ text_poke_early(addr, opcode, len);
21561 for (i = 0; i < len; i++)
21562- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
21563- local_irq_restore(flags);
21564+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
21565 return addr;
21566 }
21567
21568@@ -601,7 +615,7 @@ int poke_int3_handler(struct pt_regs *regs)
21569 if (likely(!bp_patching_in_progress))
21570 return 0;
21571
21572- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
21573+ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
21574 return 0;
21575
21576 /* set up the specified breakpoint handler */
21577@@ -635,7 +649,7 @@ int poke_int3_handler(struct pt_regs *regs)
21578 */
21579 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
21580 {
21581- unsigned char int3 = 0xcc;
21582+ const unsigned char int3 = 0xcc;
21583
21584 bp_int3_handler = handler;
21585 bp_int3_addr = (u8 *)addr + sizeof(int3);
21586diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
21587index 6776027..972266c 100644
21588--- a/arch/x86/kernel/apic/apic.c
21589+++ b/arch/x86/kernel/apic/apic.c
21590@@ -201,7 +201,7 @@ int first_system_vector = 0xfe;
21591 /*
21592 * Debug level, exported for io_apic.c
21593 */
21594-unsigned int apic_verbosity;
21595+int apic_verbosity;
21596
21597 int pic_mode;
21598
21599@@ -1989,7 +1989,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
21600 apic_write(APIC_ESR, 0);
21601 v = apic_read(APIC_ESR);
21602 ack_APIC_irq();
21603- atomic_inc(&irq_err_count);
21604+ atomic_inc_unchecked(&irq_err_count);
21605
21606 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
21607 smp_processor_id(), v);
21608diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
21609index de918c4..32eed23 100644
21610--- a/arch/x86/kernel/apic/apic_flat_64.c
21611+++ b/arch/x86/kernel/apic/apic_flat_64.c
21612@@ -154,7 +154,7 @@ static int flat_probe(void)
21613 return 1;
21614 }
21615
21616-static struct apic apic_flat = {
21617+static struct apic apic_flat __read_only = {
21618 .name = "flat",
21619 .probe = flat_probe,
21620 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
21621@@ -260,7 +260,7 @@ static int physflat_probe(void)
21622 return 0;
21623 }
21624
21625-static struct apic apic_physflat = {
21626+static struct apic apic_physflat __read_only = {
21627
21628 .name = "physical flat",
21629 .probe = physflat_probe,
21630diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
21631index b205cdb..d8503ff 100644
21632--- a/arch/x86/kernel/apic/apic_noop.c
21633+++ b/arch/x86/kernel/apic/apic_noop.c
21634@@ -108,7 +108,7 @@ static void noop_apic_write(u32 reg, u32 v)
21635 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
21636 }
21637
21638-struct apic apic_noop = {
21639+struct apic apic_noop __read_only = {
21640 .name = "noop",
21641 .probe = noop_probe,
21642 .acpi_madt_oem_check = NULL,
21643diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
21644index c4a8d63..fe893ac 100644
21645--- a/arch/x86/kernel/apic/bigsmp_32.c
21646+++ b/arch/x86/kernel/apic/bigsmp_32.c
21647@@ -147,7 +147,7 @@ static int probe_bigsmp(void)
21648 return dmi_bigsmp;
21649 }
21650
21651-static struct apic apic_bigsmp = {
21652+static struct apic apic_bigsmp __read_only = {
21653
21654 .name = "bigsmp",
21655 .probe = probe_bigsmp,
21656diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
21657index 337ce5a..c8d98b4 100644
21658--- a/arch/x86/kernel/apic/io_apic.c
21659+++ b/arch/x86/kernel/apic/io_apic.c
21660@@ -1230,7 +1230,7 @@ out:
21661 }
21662 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
21663
21664-void lock_vector_lock(void)
21665+void lock_vector_lock(void) __acquires(vector_lock)
21666 {
21667 /* Used to the online set of cpus does not change
21668 * during assign_irq_vector.
21669@@ -1238,7 +1238,7 @@ void lock_vector_lock(void)
21670 raw_spin_lock(&vector_lock);
21671 }
21672
21673-void unlock_vector_lock(void)
21674+void unlock_vector_lock(void) __releases(vector_lock)
21675 {
21676 raw_spin_unlock(&vector_lock);
21677 }
21678@@ -2465,7 +2465,7 @@ static void ack_apic_edge(struct irq_data *data)
21679 ack_APIC_irq();
21680 }
21681
21682-atomic_t irq_mis_count;
21683+atomic_unchecked_t irq_mis_count;
21684
21685 #ifdef CONFIG_GENERIC_PENDING_IRQ
21686 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
21687@@ -2606,7 +2606,7 @@ static void ack_apic_level(struct irq_data *data)
21688 * at the cpu.
21689 */
21690 if (!(v & (1 << (i & 0x1f)))) {
21691- atomic_inc(&irq_mis_count);
21692+ atomic_inc_unchecked(&irq_mis_count);
21693
21694 eoi_ioapic_irq(irq, cfg);
21695 }
21696diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
21697index bda4886..f9c7195 100644
21698--- a/arch/x86/kernel/apic/probe_32.c
21699+++ b/arch/x86/kernel/apic/probe_32.c
21700@@ -72,7 +72,7 @@ static int probe_default(void)
21701 return 1;
21702 }
21703
21704-static struct apic apic_default = {
21705+static struct apic apic_default __read_only = {
21706
21707 .name = "default",
21708 .probe = probe_default,
21709diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
21710index 6ce600f..cb44af8 100644
21711--- a/arch/x86/kernel/apic/x2apic_cluster.c
21712+++ b/arch/x86/kernel/apic/x2apic_cluster.c
21713@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
21714 return notifier_from_errno(err);
21715 }
21716
21717-static struct notifier_block __refdata x2apic_cpu_notifier = {
21718+static struct notifier_block x2apic_cpu_notifier = {
21719 .notifier_call = update_clusterinfo,
21720 };
21721
21722@@ -234,7 +234,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
21723 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
21724 }
21725
21726-static struct apic apic_x2apic_cluster = {
21727+static struct apic apic_x2apic_cluster __read_only = {
21728
21729 .name = "cluster x2apic",
21730 .probe = x2apic_cluster_probe,
21731diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
21732index 6fae733..5ca17af 100644
21733--- a/arch/x86/kernel/apic/x2apic_phys.c
21734+++ b/arch/x86/kernel/apic/x2apic_phys.c
21735@@ -88,7 +88,7 @@ static int x2apic_phys_probe(void)
21736 return apic == &apic_x2apic_phys;
21737 }
21738
21739-static struct apic apic_x2apic_phys = {
21740+static struct apic apic_x2apic_phys __read_only = {
21741
21742 .name = "physical x2apic",
21743 .probe = x2apic_phys_probe,
21744diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
21745index 004f017..8fbc8b5 100644
21746--- a/arch/x86/kernel/apic/x2apic_uv_x.c
21747+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
21748@@ -350,7 +350,7 @@ static int uv_probe(void)
21749 return apic == &apic_x2apic_uv_x;
21750 }
21751
21752-static struct apic __refdata apic_x2apic_uv_x = {
21753+static struct apic apic_x2apic_uv_x __read_only = {
21754
21755 .name = "UV large system",
21756 .probe = uv_probe,
21757diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
21758index 5848744..56cb598 100644
21759--- a/arch/x86/kernel/apm_32.c
21760+++ b/arch/x86/kernel/apm_32.c
21761@@ -433,7 +433,7 @@ static DEFINE_MUTEX(apm_mutex);
21762 * This is for buggy BIOS's that refer to (real mode) segment 0x40
21763 * even though they are called in protected mode.
21764 */
21765-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
21766+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
21767 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
21768
21769 static const char driver_version[] = "1.16ac"; /* no spaces */
21770@@ -611,7 +611,10 @@ static long __apm_bios_call(void *_call)
21771 BUG_ON(cpu != 0);
21772 gdt = get_cpu_gdt_table(cpu);
21773 save_desc_40 = gdt[0x40 / 8];
21774+
21775+ pax_open_kernel();
21776 gdt[0x40 / 8] = bad_bios_desc;
21777+ pax_close_kernel();
21778
21779 apm_irq_save(flags);
21780 APM_DO_SAVE_SEGS;
21781@@ -620,7 +623,11 @@ static long __apm_bios_call(void *_call)
21782 &call->esi);
21783 APM_DO_RESTORE_SEGS;
21784 apm_irq_restore(flags);
21785+
21786+ pax_open_kernel();
21787 gdt[0x40 / 8] = save_desc_40;
21788+ pax_close_kernel();
21789+
21790 put_cpu();
21791
21792 return call->eax & 0xff;
21793@@ -687,7 +694,10 @@ static long __apm_bios_call_simple(void *_call)
21794 BUG_ON(cpu != 0);
21795 gdt = get_cpu_gdt_table(cpu);
21796 save_desc_40 = gdt[0x40 / 8];
21797+
21798+ pax_open_kernel();
21799 gdt[0x40 / 8] = bad_bios_desc;
21800+ pax_close_kernel();
21801
21802 apm_irq_save(flags);
21803 APM_DO_SAVE_SEGS;
21804@@ -695,7 +705,11 @@ static long __apm_bios_call_simple(void *_call)
21805 &call->eax);
21806 APM_DO_RESTORE_SEGS;
21807 apm_irq_restore(flags);
21808+
21809+ pax_open_kernel();
21810 gdt[0x40 / 8] = save_desc_40;
21811+ pax_close_kernel();
21812+
21813 put_cpu();
21814 return error;
21815 }
21816@@ -2350,12 +2364,15 @@ static int __init apm_init(void)
21817 * code to that CPU.
21818 */
21819 gdt = get_cpu_gdt_table(0);
21820+
21821+ pax_open_kernel();
21822 set_desc_base(&gdt[APM_CS >> 3],
21823 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
21824 set_desc_base(&gdt[APM_CS_16 >> 3],
21825 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
21826 set_desc_base(&gdt[APM_DS >> 3],
21827 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
21828+ pax_close_kernel();
21829
21830 proc_create("apm", 0, NULL, &apm_file_ops);
21831
21832diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
21833index 9f6b934..cf5ffb3 100644
21834--- a/arch/x86/kernel/asm-offsets.c
21835+++ b/arch/x86/kernel/asm-offsets.c
21836@@ -32,6 +32,8 @@ void common(void) {
21837 OFFSET(TI_flags, thread_info, flags);
21838 OFFSET(TI_status, thread_info, status);
21839 OFFSET(TI_addr_limit, thread_info, addr_limit);
21840+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
21841+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
21842
21843 BLANK();
21844 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
21845@@ -52,8 +54,26 @@ void common(void) {
21846 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
21847 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
21848 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
21849+
21850+#ifdef CONFIG_PAX_KERNEXEC
21851+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
21852 #endif
21853
21854+#ifdef CONFIG_PAX_MEMORY_UDEREF
21855+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
21856+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
21857+#ifdef CONFIG_X86_64
21858+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
21859+#endif
21860+#endif
21861+
21862+#endif
21863+
21864+ BLANK();
21865+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
21866+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
21867+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
21868+
21869 #ifdef CONFIG_XEN
21870 BLANK();
21871 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
21872diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
21873index e7c798b..2b2019b 100644
21874--- a/arch/x86/kernel/asm-offsets_64.c
21875+++ b/arch/x86/kernel/asm-offsets_64.c
21876@@ -77,6 +77,7 @@ int main(void)
21877 BLANK();
21878 #undef ENTRY
21879
21880+ DEFINE(TSS_size, sizeof(struct tss_struct));
21881 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
21882 BLANK();
21883
21884diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
21885index 7fd54f0..0691410 100644
21886--- a/arch/x86/kernel/cpu/Makefile
21887+++ b/arch/x86/kernel/cpu/Makefile
21888@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
21889 CFLAGS_REMOVE_perf_event.o = -pg
21890 endif
21891
21892-# Make sure load_percpu_segment has no stackprotector
21893-nostackp := $(call cc-option, -fno-stack-protector)
21894-CFLAGS_common.o := $(nostackp)
21895-
21896 obj-y := intel_cacheinfo.o scattered.o topology.o
21897 obj-y += proc.o capflags.o powerflags.o common.o
21898 obj-y += rdrand.o
21899diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
21900index 60e5497..8efbd2f 100644
21901--- a/arch/x86/kernel/cpu/amd.c
21902+++ b/arch/x86/kernel/cpu/amd.c
21903@@ -711,7 +711,7 @@ static void init_amd(struct cpuinfo_x86 *c)
21904 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
21905 {
21906 /* AMD errata T13 (order #21922) */
21907- if ((c->x86 == 6)) {
21908+ if (c->x86 == 6) {
21909 /* Duron Rev A0 */
21910 if (c->x86_model == 3 && c->x86_mask == 0)
21911 size = 64;
21912diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
21913index e4ab2b4..d487ba5 100644
21914--- a/arch/x86/kernel/cpu/common.c
21915+++ b/arch/x86/kernel/cpu/common.c
21916@@ -90,60 +90,6 @@ static const struct cpu_dev default_cpu = {
21917
21918 static const struct cpu_dev *this_cpu = &default_cpu;
21919
21920-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
21921-#ifdef CONFIG_X86_64
21922- /*
21923- * We need valid kernel segments for data and code in long mode too
21924- * IRET will check the segment types kkeil 2000/10/28
21925- * Also sysret mandates a special GDT layout
21926- *
21927- * TLS descriptors are currently at a different place compared to i386.
21928- * Hopefully nobody expects them at a fixed place (Wine?)
21929- */
21930- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
21931- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
21932- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
21933- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
21934- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
21935- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
21936-#else
21937- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
21938- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21939- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
21940- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
21941- /*
21942- * Segments used for calling PnP BIOS have byte granularity.
21943- * They code segments and data segments have fixed 64k limits,
21944- * the transfer segment sizes are set at run time.
21945- */
21946- /* 32-bit code */
21947- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21948- /* 16-bit code */
21949- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21950- /* 16-bit data */
21951- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
21952- /* 16-bit data */
21953- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
21954- /* 16-bit data */
21955- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
21956- /*
21957- * The APM segments have byte granularity and their bases
21958- * are set at run time. All have 64k limits.
21959- */
21960- /* 32-bit code */
21961- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21962- /* 16-bit code */
21963- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21964- /* data */
21965- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
21966-
21967- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21968- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21969- GDT_STACK_CANARY_INIT
21970-#endif
21971-} };
21972-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
21973-
21974 static int __init x86_xsave_setup(char *s)
21975 {
21976 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
21977@@ -303,6 +249,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
21978 }
21979 }
21980
21981+#ifdef CONFIG_X86_64
21982+static __init int setup_disable_pcid(char *arg)
21983+{
21984+ setup_clear_cpu_cap(X86_FEATURE_PCID);
21985+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
21986+
21987+#ifdef CONFIG_PAX_MEMORY_UDEREF
21988+ if (clone_pgd_mask != ~(pgdval_t)0UL)
21989+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21990+#endif
21991+
21992+ return 1;
21993+}
21994+__setup("nopcid", setup_disable_pcid);
21995+
21996+static void setup_pcid(struct cpuinfo_x86 *c)
21997+{
21998+ if (!cpu_has(c, X86_FEATURE_PCID)) {
21999+ clear_cpu_cap(c, X86_FEATURE_INVPCID);
22000+
22001+#ifdef CONFIG_PAX_MEMORY_UDEREF
22002+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
22003+ pax_open_kernel();
22004+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
22005+ pax_close_kernel();
22006+ printk("PAX: slow and weak UDEREF enabled\n");
22007+ } else
22008+ printk("PAX: UDEREF disabled\n");
22009+#endif
22010+
22011+ return;
22012+ }
22013+
22014+ printk("PAX: PCID detected\n");
22015+ set_in_cr4(X86_CR4_PCIDE);
22016+
22017+#ifdef CONFIG_PAX_MEMORY_UDEREF
22018+ pax_open_kernel();
22019+ clone_pgd_mask = ~(pgdval_t)0UL;
22020+ pax_close_kernel();
22021+ if (pax_user_shadow_base)
22022+ printk("PAX: weak UDEREF enabled\n");
22023+ else {
22024+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
22025+ printk("PAX: strong UDEREF enabled\n");
22026+ }
22027+#endif
22028+
22029+ if (cpu_has(c, X86_FEATURE_INVPCID))
22030+ printk("PAX: INVPCID detected\n");
22031+}
22032+#endif
22033+
22034 /*
22035 * Some CPU features depend on higher CPUID levels, which may not always
22036 * be available due to CPUID level capping or broken virtualization
22037@@ -403,7 +402,7 @@ void switch_to_new_gdt(int cpu)
22038 {
22039 struct desc_ptr gdt_descr;
22040
22041- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
22042+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
22043 gdt_descr.size = GDT_SIZE - 1;
22044 load_gdt(&gdt_descr);
22045 /* Reload the per-cpu base */
22046@@ -893,6 +892,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
22047 setup_smep(c);
22048 setup_smap(c);
22049
22050+#ifdef CONFIG_X86_64
22051+ setup_pcid(c);
22052+#endif
22053+
22054 /*
22055 * The vendor-specific functions might have changed features.
22056 * Now we do "generic changes."
22057@@ -901,6 +904,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
22058 /* Filter out anything that depends on CPUID levels we don't have */
22059 filter_cpuid_features(c, true);
22060
22061+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
22062+ setup_clear_cpu_cap(X86_FEATURE_SEP);
22063+#endif
22064+
22065 /* If the model name is still unset, do table lookup. */
22066 if (!c->x86_model_id[0]) {
22067 const char *p;
22068@@ -981,7 +988,7 @@ static void syscall32_cpu_init(void)
22069 void enable_sep_cpu(void)
22070 {
22071 int cpu = get_cpu();
22072- struct tss_struct *tss = &per_cpu(init_tss, cpu);
22073+ struct tss_struct *tss = init_tss + cpu;
22074
22075 if (!boot_cpu_has(X86_FEATURE_SEP)) {
22076 put_cpu();
22077@@ -1121,14 +1128,16 @@ static __init int setup_disablecpuid(char *arg)
22078 }
22079 __setup("clearcpuid=", setup_disablecpuid);
22080
22081+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
22082+EXPORT_PER_CPU_SYMBOL(current_tinfo);
22083+
22084 DEFINE_PER_CPU(unsigned long, kernel_stack) =
22085- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
22086+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
22087 EXPORT_PER_CPU_SYMBOL(kernel_stack);
22088
22089 #ifdef CONFIG_X86_64
22090-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
22091-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
22092- (unsigned long) debug_idt_table };
22093+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
22094+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
22095
22096 DEFINE_PER_CPU_FIRST(union irq_stack_union,
22097 irq_stack_union) __aligned(PAGE_SIZE) __visible;
22098@@ -1291,7 +1300,7 @@ void cpu_init(void)
22099 load_ucode_ap();
22100
22101 cpu = stack_smp_processor_id();
22102- t = &per_cpu(init_tss, cpu);
22103+ t = init_tss + cpu;
22104 oist = &per_cpu(orig_ist, cpu);
22105
22106 #ifdef CONFIG_NUMA
22107@@ -1326,7 +1335,6 @@ void cpu_init(void)
22108 wrmsrl(MSR_KERNEL_GS_BASE, 0);
22109 barrier();
22110
22111- x86_configure_nx();
22112 enable_x2apic();
22113
22114 /*
22115@@ -1378,7 +1386,7 @@ void cpu_init(void)
22116 {
22117 int cpu = smp_processor_id();
22118 struct task_struct *curr = current;
22119- struct tss_struct *t = &per_cpu(init_tss, cpu);
22120+ struct tss_struct *t = init_tss + cpu;
22121 struct thread_struct *thread = &curr->thread;
22122
22123 show_ucode_info_early();
22124diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
22125index c703507..28535e3 100644
22126--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
22127+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
22128@@ -1026,6 +1026,22 @@ static struct attribute *default_attrs[] = {
22129 };
22130
22131 #ifdef CONFIG_AMD_NB
22132+static struct attribute *default_attrs_amd_nb[] = {
22133+ &type.attr,
22134+ &level.attr,
22135+ &coherency_line_size.attr,
22136+ &physical_line_partition.attr,
22137+ &ways_of_associativity.attr,
22138+ &number_of_sets.attr,
22139+ &size.attr,
22140+ &shared_cpu_map.attr,
22141+ &shared_cpu_list.attr,
22142+ NULL,
22143+ NULL,
22144+ NULL,
22145+ NULL
22146+};
22147+
22148 static struct attribute **amd_l3_attrs(void)
22149 {
22150 static struct attribute **attrs;
22151@@ -1036,18 +1052,7 @@ static struct attribute **amd_l3_attrs(void)
22152
22153 n = ARRAY_SIZE(default_attrs);
22154
22155- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
22156- n += 2;
22157-
22158- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
22159- n += 1;
22160-
22161- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
22162- if (attrs == NULL)
22163- return attrs = default_attrs;
22164-
22165- for (n = 0; default_attrs[n]; n++)
22166- attrs[n] = default_attrs[n];
22167+ attrs = default_attrs_amd_nb;
22168
22169 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
22170 attrs[n++] = &cache_disable_0.attr;
22171@@ -1098,6 +1103,13 @@ static struct kobj_type ktype_cache = {
22172 .default_attrs = default_attrs,
22173 };
22174
22175+#ifdef CONFIG_AMD_NB
22176+static struct kobj_type ktype_cache_amd_nb = {
22177+ .sysfs_ops = &sysfs_ops,
22178+ .default_attrs = default_attrs_amd_nb,
22179+};
22180+#endif
22181+
22182 static struct kobj_type ktype_percpu_entry = {
22183 .sysfs_ops = &sysfs_ops,
22184 };
22185@@ -1163,20 +1175,26 @@ static int cache_add_dev(struct device *dev)
22186 return retval;
22187 }
22188
22189+#ifdef CONFIG_AMD_NB
22190+ amd_l3_attrs();
22191+#endif
22192+
22193 for (i = 0; i < num_cache_leaves; i++) {
22194+ struct kobj_type *ktype;
22195+
22196 this_object = INDEX_KOBJECT_PTR(cpu, i);
22197 this_object->cpu = cpu;
22198 this_object->index = i;
22199
22200 this_leaf = CPUID4_INFO_IDX(cpu, i);
22201
22202- ktype_cache.default_attrs = default_attrs;
22203+ ktype = &ktype_cache;
22204 #ifdef CONFIG_AMD_NB
22205 if (this_leaf->base.nb)
22206- ktype_cache.default_attrs = amd_l3_attrs();
22207+ ktype = &ktype_cache_amd_nb;
22208 #endif
22209 retval = kobject_init_and_add(&(this_object->kobj),
22210- &ktype_cache,
22211+ ktype,
22212 per_cpu(ici_cache_kobject, cpu),
22213 "index%1lu", i);
22214 if (unlikely(retval)) {
22215diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
22216index bd9ccda..38314e7 100644
22217--- a/arch/x86/kernel/cpu/mcheck/mce.c
22218+++ b/arch/x86/kernel/cpu/mcheck/mce.c
22219@@ -45,6 +45,7 @@
22220 #include <asm/processor.h>
22221 #include <asm/mce.h>
22222 #include <asm/msr.h>
22223+#include <asm/local.h>
22224
22225 #include "mce-internal.h"
22226
22227@@ -259,7 +260,7 @@ static void print_mce(struct mce *m)
22228 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
22229 m->cs, m->ip);
22230
22231- if (m->cs == __KERNEL_CS)
22232+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
22233 print_symbol("{%s}", m->ip);
22234 pr_cont("\n");
22235 }
22236@@ -292,10 +293,10 @@ static void print_mce(struct mce *m)
22237
22238 #define PANIC_TIMEOUT 5 /* 5 seconds */
22239
22240-static atomic_t mce_paniced;
22241+static atomic_unchecked_t mce_paniced;
22242
22243 static int fake_panic;
22244-static atomic_t mce_fake_paniced;
22245+static atomic_unchecked_t mce_fake_paniced;
22246
22247 /* Panic in progress. Enable interrupts and wait for final IPI */
22248 static void wait_for_panic(void)
22249@@ -319,7 +320,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
22250 /*
22251 * Make sure only one CPU runs in machine check panic
22252 */
22253- if (atomic_inc_return(&mce_paniced) > 1)
22254+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
22255 wait_for_panic();
22256 barrier();
22257
22258@@ -327,7 +328,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
22259 console_verbose();
22260 } else {
22261 /* Don't log too much for fake panic */
22262- if (atomic_inc_return(&mce_fake_paniced) > 1)
22263+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
22264 return;
22265 }
22266 /* First print corrected ones that are still unlogged */
22267@@ -366,7 +367,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
22268 if (!fake_panic) {
22269 if (panic_timeout == 0)
22270 panic_timeout = mca_cfg.panic_timeout;
22271- panic(msg);
22272+ panic("%s", msg);
22273 } else
22274 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
22275 }
22276@@ -697,7 +698,7 @@ static int mce_timed_out(u64 *t)
22277 * might have been modified by someone else.
22278 */
22279 rmb();
22280- if (atomic_read(&mce_paniced))
22281+ if (atomic_read_unchecked(&mce_paniced))
22282 wait_for_panic();
22283 if (!mca_cfg.monarch_timeout)
22284 goto out;
22285@@ -1674,7 +1675,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
22286 }
22287
22288 /* Call the installed machine check handler for this CPU setup. */
22289-void (*machine_check_vector)(struct pt_regs *, long error_code) =
22290+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
22291 unexpected_machine_check;
22292
22293 /*
22294@@ -1697,7 +1698,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
22295 return;
22296 }
22297
22298+ pax_open_kernel();
22299 machine_check_vector = do_machine_check;
22300+ pax_close_kernel();
22301
22302 __mcheck_cpu_init_generic();
22303 __mcheck_cpu_init_vendor(c);
22304@@ -1711,7 +1714,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
22305 */
22306
22307 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
22308-static int mce_chrdev_open_count; /* #times opened */
22309+static local_t mce_chrdev_open_count; /* #times opened */
22310 static int mce_chrdev_open_exclu; /* already open exclusive? */
22311
22312 static int mce_chrdev_open(struct inode *inode, struct file *file)
22313@@ -1719,7 +1722,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
22314 spin_lock(&mce_chrdev_state_lock);
22315
22316 if (mce_chrdev_open_exclu ||
22317- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
22318+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
22319 spin_unlock(&mce_chrdev_state_lock);
22320
22321 return -EBUSY;
22322@@ -1727,7 +1730,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
22323
22324 if (file->f_flags & O_EXCL)
22325 mce_chrdev_open_exclu = 1;
22326- mce_chrdev_open_count++;
22327+ local_inc(&mce_chrdev_open_count);
22328
22329 spin_unlock(&mce_chrdev_state_lock);
22330
22331@@ -1738,7 +1741,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
22332 {
22333 spin_lock(&mce_chrdev_state_lock);
22334
22335- mce_chrdev_open_count--;
22336+ local_dec(&mce_chrdev_open_count);
22337 mce_chrdev_open_exclu = 0;
22338
22339 spin_unlock(&mce_chrdev_state_lock);
22340@@ -2413,7 +2416,7 @@ static __init void mce_init_banks(void)
22341
22342 for (i = 0; i < mca_cfg.banks; i++) {
22343 struct mce_bank *b = &mce_banks[i];
22344- struct device_attribute *a = &b->attr;
22345+ device_attribute_no_const *a = &b->attr;
22346
22347 sysfs_attr_init(&a->attr);
22348 a->attr.name = b->attrname;
22349@@ -2520,7 +2523,7 @@ struct dentry *mce_get_debugfs_dir(void)
22350 static void mce_reset(void)
22351 {
22352 cpu_missing = 0;
22353- atomic_set(&mce_fake_paniced, 0);
22354+ atomic_set_unchecked(&mce_fake_paniced, 0);
22355 atomic_set(&mce_executing, 0);
22356 atomic_set(&mce_callin, 0);
22357 atomic_set(&global_nwo, 0);
22358diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
22359index a304298..49b6d06 100644
22360--- a/arch/x86/kernel/cpu/mcheck/p5.c
22361+++ b/arch/x86/kernel/cpu/mcheck/p5.c
22362@@ -10,6 +10,7 @@
22363 #include <asm/processor.h>
22364 #include <asm/mce.h>
22365 #include <asm/msr.h>
22366+#include <asm/pgtable.h>
22367
22368 /* By default disabled */
22369 int mce_p5_enabled __read_mostly;
22370@@ -48,7 +49,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
22371 if (!cpu_has(c, X86_FEATURE_MCE))
22372 return;
22373
22374+ pax_open_kernel();
22375 machine_check_vector = pentium_machine_check;
22376+ pax_close_kernel();
22377 /* Make sure the vector pointer is visible before we enable MCEs: */
22378 wmb();
22379
22380diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
22381index 7dc5564..1273569 100644
22382--- a/arch/x86/kernel/cpu/mcheck/winchip.c
22383+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
22384@@ -9,6 +9,7 @@
22385 #include <asm/processor.h>
22386 #include <asm/mce.h>
22387 #include <asm/msr.h>
22388+#include <asm/pgtable.h>
22389
22390 /* Machine check handler for WinChip C6: */
22391 static void winchip_machine_check(struct pt_regs *regs, long error_code)
22392@@ -22,7 +23,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
22393 {
22394 u32 lo, hi;
22395
22396+ pax_open_kernel();
22397 machine_check_vector = winchip_machine_check;
22398+ pax_close_kernel();
22399 /* Make sure the vector pointer is visible before we enable MCEs: */
22400 wmb();
22401
22402diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
22403index dd9d619..86e1d81 100644
22404--- a/arch/x86/kernel/cpu/microcode/core.c
22405+++ b/arch/x86/kernel/cpu/microcode/core.c
22406@@ -516,7 +516,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
22407 return NOTIFY_OK;
22408 }
22409
22410-static struct notifier_block __refdata mc_cpu_notifier = {
22411+static struct notifier_block mc_cpu_notifier = {
22412 .notifier_call = mc_cpu_callback,
22413 };
22414
22415diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
22416index a276fa7..e66810f 100644
22417--- a/arch/x86/kernel/cpu/microcode/intel.c
22418+++ b/arch/x86/kernel/cpu/microcode/intel.c
22419@@ -293,13 +293,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
22420
22421 static int get_ucode_user(void *to, const void *from, size_t n)
22422 {
22423- return copy_from_user(to, from, n);
22424+ return copy_from_user(to, (const void __force_user *)from, n);
22425 }
22426
22427 static enum ucode_state
22428 request_microcode_user(int cpu, const void __user *buf, size_t size)
22429 {
22430- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
22431+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
22432 }
22433
22434 static void microcode_fini_cpu(int cpu)
22435diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
22436index f961de9..8a9d332 100644
22437--- a/arch/x86/kernel/cpu/mtrr/main.c
22438+++ b/arch/x86/kernel/cpu/mtrr/main.c
22439@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
22440 u64 size_or_mask, size_and_mask;
22441 static bool mtrr_aps_delayed_init;
22442
22443-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
22444+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
22445
22446 const struct mtrr_ops *mtrr_if;
22447
22448diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
22449index df5e41f..816c719 100644
22450--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
22451+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
22452@@ -25,7 +25,7 @@ struct mtrr_ops {
22453 int (*validate_add_page)(unsigned long base, unsigned long size,
22454 unsigned int type);
22455 int (*have_wrcomb)(void);
22456-};
22457+} __do_const;
22458
22459 extern int generic_get_free_region(unsigned long base, unsigned long size,
22460 int replace_reg);
22461diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
22462index 2879ecd..bb8c80b 100644
22463--- a/arch/x86/kernel/cpu/perf_event.c
22464+++ b/arch/x86/kernel/cpu/perf_event.c
22465@@ -1372,7 +1372,7 @@ static void __init pmu_check_apic(void)
22466
22467 }
22468
22469-static struct attribute_group x86_pmu_format_group = {
22470+static attribute_group_no_const x86_pmu_format_group = {
22471 .name = "format",
22472 .attrs = NULL,
22473 };
22474@@ -1471,7 +1471,7 @@ static struct attribute *events_attr[] = {
22475 NULL,
22476 };
22477
22478-static struct attribute_group x86_pmu_events_group = {
22479+static attribute_group_no_const x86_pmu_events_group = {
22480 .name = "events",
22481 .attrs = events_attr,
22482 };
22483@@ -1995,7 +1995,7 @@ static unsigned long get_segment_base(unsigned int segment)
22484 if (idx > GDT_ENTRIES)
22485 return 0;
22486
22487- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
22488+ desc = get_cpu_gdt_table(smp_processor_id());
22489 }
22490
22491 return get_desc_base(desc + idx);
22492@@ -2085,7 +2085,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
22493 break;
22494
22495 perf_callchain_store(entry, frame.return_address);
22496- fp = frame.next_frame;
22497+ fp = (const void __force_user *)frame.next_frame;
22498 }
22499 }
22500
22501diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
22502index 639d128..e92d7e5 100644
22503--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
22504+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
22505@@ -405,7 +405,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
22506 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
22507 {
22508 struct attribute **attrs;
22509- struct attribute_group *attr_group;
22510+ attribute_group_no_const *attr_group;
22511 int i = 0, j;
22512
22513 while (amd_iommu_v2_event_descs[i].attr.attr.name)
22514diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
22515index 2502d0d..e5cc05c 100644
22516--- a/arch/x86/kernel/cpu/perf_event_intel.c
22517+++ b/arch/x86/kernel/cpu/perf_event_intel.c
22518@@ -2353,10 +2353,10 @@ __init int intel_pmu_init(void)
22519 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
22520
22521 if (boot_cpu_has(X86_FEATURE_PDCM)) {
22522- u64 capabilities;
22523+ u64 capabilities = x86_pmu.intel_cap.capabilities;
22524
22525- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
22526- x86_pmu.intel_cap.capabilities = capabilities;
22527+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
22528+ x86_pmu.intel_cap.capabilities = capabilities;
22529 }
22530
22531 intel_ds_init();
22532diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
22533index 619f769..d510008 100644
22534--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
22535+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
22536@@ -449,7 +449,7 @@ static struct attribute *rapl_events_hsw_attr[] = {
22537 NULL,
22538 };
22539
22540-static struct attribute_group rapl_pmu_events_group = {
22541+static attribute_group_no_const rapl_pmu_events_group __read_only = {
22542 .name = "events",
22543 .attrs = NULL, /* patched at runtime */
22544 };
22545diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
22546index 0939f86..69730af 100644
22547--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
22548+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
22549@@ -3691,7 +3691,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
22550 static int __init uncore_type_init(struct intel_uncore_type *type)
22551 {
22552 struct intel_uncore_pmu *pmus;
22553- struct attribute_group *attr_group;
22554+ attribute_group_no_const *attr_group;
22555 struct attribute **attrs;
22556 int i, j;
22557
22558diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
22559index 90236f0..54cb20d 100644
22560--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
22561+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
22562@@ -503,7 +503,7 @@ struct intel_uncore_box {
22563 struct uncore_event_desc {
22564 struct kobj_attribute attr;
22565 const char *config;
22566-};
22567+} __do_const;
22568
22569 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
22570 { \
22571diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
22572index 3225ae6c..ee3c6db 100644
22573--- a/arch/x86/kernel/cpuid.c
22574+++ b/arch/x86/kernel/cpuid.c
22575@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
22576 return notifier_from_errno(err);
22577 }
22578
22579-static struct notifier_block __refdata cpuid_class_cpu_notifier =
22580+static struct notifier_block cpuid_class_cpu_notifier =
22581 {
22582 .notifier_call = cpuid_class_cpu_callback,
22583 };
22584diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
22585index a618fcd..200e95b 100644
22586--- a/arch/x86/kernel/crash.c
22587+++ b/arch/x86/kernel/crash.c
22588@@ -104,7 +104,7 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
22589 #ifdef CONFIG_X86_32
22590 struct pt_regs fixed_regs;
22591
22592- if (!user_mode_vm(regs)) {
22593+ if (!user_mode(regs)) {
22594 crash_fixup_ss_esp(&fixed_regs, regs);
22595 regs = &fixed_regs;
22596 }
22597diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
22598index afa64ad..dce67dd 100644
22599--- a/arch/x86/kernel/crash_dump_64.c
22600+++ b/arch/x86/kernel/crash_dump_64.c
22601@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
22602 return -ENOMEM;
22603
22604 if (userbuf) {
22605- if (copy_to_user(buf, vaddr + offset, csize)) {
22606+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
22607 iounmap(vaddr);
22608 return -EFAULT;
22609 }
22610diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
22611index f6dfd93..892ade4 100644
22612--- a/arch/x86/kernel/doublefault.c
22613+++ b/arch/x86/kernel/doublefault.c
22614@@ -12,7 +12,7 @@
22615
22616 #define DOUBLEFAULT_STACKSIZE (1024)
22617 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
22618-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
22619+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
22620
22621 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
22622
22623@@ -22,7 +22,7 @@ static void doublefault_fn(void)
22624 unsigned long gdt, tss;
22625
22626 native_store_gdt(&gdt_desc);
22627- gdt = gdt_desc.address;
22628+ gdt = (unsigned long)gdt_desc.address;
22629
22630 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
22631
22632@@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
22633 /* 0x2 bit is always set */
22634 .flags = X86_EFLAGS_SF | 0x2,
22635 .sp = STACK_START,
22636- .es = __USER_DS,
22637+ .es = __KERNEL_DS,
22638 .cs = __KERNEL_CS,
22639 .ss = __KERNEL_DS,
22640- .ds = __USER_DS,
22641+ .ds = __KERNEL_DS,
22642 .fs = __KERNEL_PERCPU,
22643
22644 .__cr3 = __pa_nodebug(swapper_pg_dir),
22645diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
22646index b74ebc7..2c95874 100644
22647--- a/arch/x86/kernel/dumpstack.c
22648+++ b/arch/x86/kernel/dumpstack.c
22649@@ -2,6 +2,9 @@
22650 * Copyright (C) 1991, 1992 Linus Torvalds
22651 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
22652 */
22653+#ifdef CONFIG_GRKERNSEC_HIDESYM
22654+#define __INCLUDED_BY_HIDESYM 1
22655+#endif
22656 #include <linux/kallsyms.h>
22657 #include <linux/kprobes.h>
22658 #include <linux/uaccess.h>
22659@@ -33,23 +36,21 @@ static void printk_stack_address(unsigned long address, int reliable)
22660
22661 void printk_address(unsigned long address)
22662 {
22663- pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
22664+ pr_cont(" [<%p>] %pA\n", (void *)address, (void *)address);
22665 }
22666
22667 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
22668 static void
22669 print_ftrace_graph_addr(unsigned long addr, void *data,
22670 const struct stacktrace_ops *ops,
22671- struct thread_info *tinfo, int *graph)
22672+ struct task_struct *task, int *graph)
22673 {
22674- struct task_struct *task;
22675 unsigned long ret_addr;
22676 int index;
22677
22678 if (addr != (unsigned long)return_to_handler)
22679 return;
22680
22681- task = tinfo->task;
22682 index = task->curr_ret_stack;
22683
22684 if (!task->ret_stack || index < *graph)
22685@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
22686 static inline void
22687 print_ftrace_graph_addr(unsigned long addr, void *data,
22688 const struct stacktrace_ops *ops,
22689- struct thread_info *tinfo, int *graph)
22690+ struct task_struct *task, int *graph)
22691 { }
22692 #endif
22693
22694@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
22695 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
22696 */
22697
22698-static inline int valid_stack_ptr(struct thread_info *tinfo,
22699- void *p, unsigned int size, void *end)
22700+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
22701 {
22702- void *t = tinfo;
22703 if (end) {
22704 if (p < end && p >= (end-THREAD_SIZE))
22705 return 1;
22706@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
22707 }
22708
22709 unsigned long
22710-print_context_stack(struct thread_info *tinfo,
22711+print_context_stack(struct task_struct *task, void *stack_start,
22712 unsigned long *stack, unsigned long bp,
22713 const struct stacktrace_ops *ops, void *data,
22714 unsigned long *end, int *graph)
22715 {
22716 struct stack_frame *frame = (struct stack_frame *)bp;
22717
22718- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
22719+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
22720 unsigned long addr;
22721
22722 addr = *stack;
22723@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo,
22724 } else {
22725 ops->address(data, addr, 0);
22726 }
22727- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22728+ print_ftrace_graph_addr(addr, data, ops, task, graph);
22729 }
22730 stack++;
22731 }
22732@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
22733 EXPORT_SYMBOL_GPL(print_context_stack);
22734
22735 unsigned long
22736-print_context_stack_bp(struct thread_info *tinfo,
22737+print_context_stack_bp(struct task_struct *task, void *stack_start,
22738 unsigned long *stack, unsigned long bp,
22739 const struct stacktrace_ops *ops, void *data,
22740 unsigned long *end, int *graph)
22741@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22742 struct stack_frame *frame = (struct stack_frame *)bp;
22743 unsigned long *ret_addr = &frame->return_address;
22744
22745- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
22746+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
22747 unsigned long addr = *ret_addr;
22748
22749 if (!__kernel_text_address(addr))
22750@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22751 ops->address(data, addr, 1);
22752 frame = frame->next_frame;
22753 ret_addr = &frame->return_address;
22754- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22755+ print_ftrace_graph_addr(addr, data, ops, task, graph);
22756 }
22757
22758 return (unsigned long)frame;
22759@@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name)
22760 static void print_trace_address(void *data, unsigned long addr, int reliable)
22761 {
22762 touch_nmi_watchdog();
22763- printk(data);
22764+ printk("%s", (char *)data);
22765 printk_stack_address(addr, reliable);
22766 }
22767
22768@@ -225,6 +224,8 @@ unsigned long oops_begin(void)
22769 EXPORT_SYMBOL_GPL(oops_begin);
22770 NOKPROBE_SYMBOL(oops_begin);
22771
22772+extern void gr_handle_kernel_exploit(void);
22773+
22774 void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22775 {
22776 if (regs && kexec_should_crash(current))
22777@@ -246,7 +247,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22778 panic("Fatal exception in interrupt");
22779 if (panic_on_oops)
22780 panic("Fatal exception");
22781- do_exit(signr);
22782+
22783+ gr_handle_kernel_exploit();
22784+
22785+ do_group_exit(signr);
22786 }
22787 NOKPROBE_SYMBOL(oops_end);
22788
22789@@ -275,7 +279,7 @@ int __die(const char *str, struct pt_regs *regs, long err)
22790 print_modules();
22791 show_regs(regs);
22792 #ifdef CONFIG_X86_32
22793- if (user_mode_vm(regs)) {
22794+ if (user_mode(regs)) {
22795 sp = regs->sp;
22796 ss = regs->ss & 0xffff;
22797 } else {
22798@@ -304,7 +308,7 @@ void die(const char *str, struct pt_regs *regs, long err)
22799 unsigned long flags = oops_begin();
22800 int sig = SIGSEGV;
22801
22802- if (!user_mode_vm(regs))
22803+ if (!user_mode(regs))
22804 report_bug(regs->ip, regs);
22805
22806 if (__die(str, regs, err))
22807diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
22808index 5abd4cd..c65733b 100644
22809--- a/arch/x86/kernel/dumpstack_32.c
22810+++ b/arch/x86/kernel/dumpstack_32.c
22811@@ -61,15 +61,14 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22812 bp = stack_frame(task, regs);
22813
22814 for (;;) {
22815- struct thread_info *context;
22816+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22817 void *end_stack;
22818
22819 end_stack = is_hardirq_stack(stack, cpu);
22820 if (!end_stack)
22821 end_stack = is_softirq_stack(stack, cpu);
22822
22823- context = task_thread_info(task);
22824- bp = ops->walk_stack(context, stack, bp, ops, data,
22825+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data,
22826 end_stack, &graph);
22827
22828 /* Stop if not on irq stack */
22829@@ -123,27 +122,28 @@ void show_regs(struct pt_regs *regs)
22830 int i;
22831
22832 show_regs_print_info(KERN_EMERG);
22833- __show_regs(regs, !user_mode_vm(regs));
22834+ __show_regs(regs, !user_mode(regs));
22835
22836 /*
22837 * When in-kernel, we also print out the stack and code at the
22838 * time of the fault..
22839 */
22840- if (!user_mode_vm(regs)) {
22841+ if (!user_mode(regs)) {
22842 unsigned int code_prologue = code_bytes * 43 / 64;
22843 unsigned int code_len = code_bytes;
22844 unsigned char c;
22845 u8 *ip;
22846+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
22847
22848 pr_emerg("Stack:\n");
22849 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
22850
22851 pr_emerg("Code:");
22852
22853- ip = (u8 *)regs->ip - code_prologue;
22854+ ip = (u8 *)regs->ip - code_prologue + cs_base;
22855 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
22856 /* try starting at IP */
22857- ip = (u8 *)regs->ip;
22858+ ip = (u8 *)regs->ip + cs_base;
22859 code_len = code_len - code_prologue + 1;
22860 }
22861 for (i = 0; i < code_len; i++, ip++) {
22862@@ -152,7 +152,7 @@ void show_regs(struct pt_regs *regs)
22863 pr_cont(" Bad EIP value.");
22864 break;
22865 }
22866- if (ip == (u8 *)regs->ip)
22867+ if (ip == (u8 *)regs->ip + cs_base)
22868 pr_cont(" <%02x>", c);
22869 else
22870 pr_cont(" %02x", c);
22871@@ -165,6 +165,7 @@ int is_valid_bugaddr(unsigned long ip)
22872 {
22873 unsigned short ud2;
22874
22875+ ip = ktla_ktva(ip);
22876 if (ip < PAGE_OFFSET)
22877 return 0;
22878 if (probe_kernel_address((unsigned short *)ip, ud2))
22879@@ -172,3 +173,15 @@ int is_valid_bugaddr(unsigned long ip)
22880
22881 return ud2 == 0x0b0f;
22882 }
22883+
22884+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22885+void pax_check_alloca(unsigned long size)
22886+{
22887+ unsigned long sp = (unsigned long)&sp, stack_left;
22888+
22889+ /* all kernel stacks are of the same size */
22890+ stack_left = sp & (THREAD_SIZE - 1);
22891+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22892+}
22893+EXPORT_SYMBOL(pax_check_alloca);
22894+#endif
22895diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
22896index 1abcb50..6c8d702 100644
22897--- a/arch/x86/kernel/dumpstack_64.c
22898+++ b/arch/x86/kernel/dumpstack_64.c
22899@@ -154,12 +154,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22900 const struct stacktrace_ops *ops, void *data)
22901 {
22902 const unsigned cpu = get_cpu();
22903- struct thread_info *tinfo;
22904 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
22905 unsigned long dummy;
22906 unsigned used = 0;
22907 int graph = 0;
22908 int done = 0;
22909+ void *stack_start;
22910
22911 if (!task)
22912 task = current;
22913@@ -180,7 +180,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22914 * current stack address. If the stacks consist of nested
22915 * exceptions
22916 */
22917- tinfo = task_thread_info(task);
22918 while (!done) {
22919 unsigned long *stack_end;
22920 enum stack_type stype;
22921@@ -203,7 +202,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22922 if (ops->stack(data, id) < 0)
22923 break;
22924
22925- bp = ops->walk_stack(tinfo, stack, bp, ops,
22926+ bp = ops->walk_stack(task, stack_end - EXCEPTION_STKSZ, stack, bp, ops,
22927 data, stack_end, &graph);
22928 ops->stack(data, "<EOE>");
22929 /*
22930@@ -211,6 +210,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22931 * second-to-last pointer (index -2 to end) in the
22932 * exception stack:
22933 */
22934+ if ((u16)stack_end[-1] != __KERNEL_DS)
22935+ goto out;
22936 stack = (unsigned long *) stack_end[-2];
22937 done = 0;
22938 break;
22939@@ -219,7 +220,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22940
22941 if (ops->stack(data, "IRQ") < 0)
22942 break;
22943- bp = ops->walk_stack(tinfo, stack, bp,
22944+ bp = ops->walk_stack(task, irq_stack, stack, bp,
22945 ops, data, stack_end, &graph);
22946 /*
22947 * We link to the next stack (which would be
22948@@ -241,7 +242,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22949 /*
22950 * This handles the process stack:
22951 */
22952- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
22953+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22954+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
22955+out:
22956 put_cpu();
22957 }
22958 EXPORT_SYMBOL(dump_trace);
22959@@ -350,3 +353,50 @@ int is_valid_bugaddr(unsigned long ip)
22960
22961 return ud2 == 0x0b0f;
22962 }
22963+
22964+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22965+void pax_check_alloca(unsigned long size)
22966+{
22967+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
22968+ unsigned cpu, used;
22969+ char *id;
22970+
22971+ /* check the process stack first */
22972+ stack_start = (unsigned long)task_stack_page(current);
22973+ stack_end = stack_start + THREAD_SIZE;
22974+ if (likely(stack_start <= sp && sp < stack_end)) {
22975+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
22976+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22977+ return;
22978+ }
22979+
22980+ cpu = get_cpu();
22981+
22982+ /* check the irq stacks */
22983+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
22984+ stack_start = stack_end - IRQ_STACK_SIZE;
22985+ if (stack_start <= sp && sp < stack_end) {
22986+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
22987+ put_cpu();
22988+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22989+ return;
22990+ }
22991+
22992+ /* check the exception stacks */
22993+ used = 0;
22994+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
22995+ stack_start = stack_end - EXCEPTION_STKSZ;
22996+ if (stack_end && stack_start <= sp && sp < stack_end) {
22997+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
22998+ put_cpu();
22999+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
23000+ return;
23001+ }
23002+
23003+ put_cpu();
23004+
23005+ /* unknown stack */
23006+ BUG();
23007+}
23008+EXPORT_SYMBOL(pax_check_alloca);
23009+#endif
23010diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
23011index 988c00a..4f673b6 100644
23012--- a/arch/x86/kernel/e820.c
23013+++ b/arch/x86/kernel/e820.c
23014@@ -803,8 +803,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
23015
23016 static void early_panic(char *msg)
23017 {
23018- early_printk(msg);
23019- panic(msg);
23020+ early_printk("%s", msg);
23021+ panic("%s", msg);
23022 }
23023
23024 static int userdef __initdata;
23025diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
23026index 01d1c18..8073693 100644
23027--- a/arch/x86/kernel/early_printk.c
23028+++ b/arch/x86/kernel/early_printk.c
23029@@ -7,6 +7,7 @@
23030 #include <linux/pci_regs.h>
23031 #include <linux/pci_ids.h>
23032 #include <linux/errno.h>
23033+#include <linux/sched.h>
23034 #include <asm/io.h>
23035 #include <asm/processor.h>
23036 #include <asm/fcntl.h>
23037diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
23038index 4b0e1df..884b67e 100644
23039--- a/arch/x86/kernel/entry_32.S
23040+++ b/arch/x86/kernel/entry_32.S
23041@@ -177,13 +177,153 @@
23042 /*CFI_REL_OFFSET gs, PT_GS*/
23043 .endm
23044 .macro SET_KERNEL_GS reg
23045+
23046+#ifdef CONFIG_CC_STACKPROTECTOR
23047 movl $(__KERNEL_STACK_CANARY), \reg
23048+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
23049+ movl $(__USER_DS), \reg
23050+#else
23051+ xorl \reg, \reg
23052+#endif
23053+
23054 movl \reg, %gs
23055 .endm
23056
23057 #endif /* CONFIG_X86_32_LAZY_GS */
23058
23059-.macro SAVE_ALL
23060+.macro pax_enter_kernel
23061+#ifdef CONFIG_PAX_KERNEXEC
23062+ call pax_enter_kernel
23063+#endif
23064+.endm
23065+
23066+.macro pax_exit_kernel
23067+#ifdef CONFIG_PAX_KERNEXEC
23068+ call pax_exit_kernel
23069+#endif
23070+.endm
23071+
23072+#ifdef CONFIG_PAX_KERNEXEC
23073+ENTRY(pax_enter_kernel)
23074+#ifdef CONFIG_PARAVIRT
23075+ pushl %eax
23076+ pushl %ecx
23077+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
23078+ mov %eax, %esi
23079+#else
23080+ mov %cr0, %esi
23081+#endif
23082+ bts $16, %esi
23083+ jnc 1f
23084+ mov %cs, %esi
23085+ cmp $__KERNEL_CS, %esi
23086+ jz 3f
23087+ ljmp $__KERNEL_CS, $3f
23088+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
23089+2:
23090+#ifdef CONFIG_PARAVIRT
23091+ mov %esi, %eax
23092+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
23093+#else
23094+ mov %esi, %cr0
23095+#endif
23096+3:
23097+#ifdef CONFIG_PARAVIRT
23098+ popl %ecx
23099+ popl %eax
23100+#endif
23101+ ret
23102+ENDPROC(pax_enter_kernel)
23103+
23104+ENTRY(pax_exit_kernel)
23105+#ifdef CONFIG_PARAVIRT
23106+ pushl %eax
23107+ pushl %ecx
23108+#endif
23109+ mov %cs, %esi
23110+ cmp $__KERNEXEC_KERNEL_CS, %esi
23111+ jnz 2f
23112+#ifdef CONFIG_PARAVIRT
23113+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
23114+ mov %eax, %esi
23115+#else
23116+ mov %cr0, %esi
23117+#endif
23118+ btr $16, %esi
23119+ ljmp $__KERNEL_CS, $1f
23120+1:
23121+#ifdef CONFIG_PARAVIRT
23122+ mov %esi, %eax
23123+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
23124+#else
23125+ mov %esi, %cr0
23126+#endif
23127+2:
23128+#ifdef CONFIG_PARAVIRT
23129+ popl %ecx
23130+ popl %eax
23131+#endif
23132+ ret
23133+ENDPROC(pax_exit_kernel)
23134+#endif
23135+
23136+ .macro pax_erase_kstack
23137+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23138+ call pax_erase_kstack
23139+#endif
23140+ .endm
23141+
23142+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23143+/*
23144+ * ebp: thread_info
23145+ */
23146+ENTRY(pax_erase_kstack)
23147+ pushl %edi
23148+ pushl %ecx
23149+ pushl %eax
23150+
23151+ mov TI_lowest_stack(%ebp), %edi
23152+ mov $-0xBEEF, %eax
23153+ std
23154+
23155+1: mov %edi, %ecx
23156+ and $THREAD_SIZE_asm - 1, %ecx
23157+ shr $2, %ecx
23158+ repne scasl
23159+ jecxz 2f
23160+
23161+ cmp $2*16, %ecx
23162+ jc 2f
23163+
23164+ mov $2*16, %ecx
23165+ repe scasl
23166+ jecxz 2f
23167+ jne 1b
23168+
23169+2: cld
23170+ mov %esp, %ecx
23171+ sub %edi, %ecx
23172+
23173+ cmp $THREAD_SIZE_asm, %ecx
23174+ jb 3f
23175+ ud2
23176+3:
23177+
23178+ shr $2, %ecx
23179+ rep stosl
23180+
23181+ mov TI_task_thread_sp0(%ebp), %edi
23182+ sub $128, %edi
23183+ mov %edi, TI_lowest_stack(%ebp)
23184+
23185+ popl %eax
23186+ popl %ecx
23187+ popl %edi
23188+ ret
23189+ENDPROC(pax_erase_kstack)
23190+#endif
23191+
23192+.macro __SAVE_ALL _DS
23193 cld
23194 PUSH_GS
23195 pushl_cfi %fs
23196@@ -206,7 +346,7 @@
23197 CFI_REL_OFFSET ecx, 0
23198 pushl_cfi %ebx
23199 CFI_REL_OFFSET ebx, 0
23200- movl $(__USER_DS), %edx
23201+ movl $\_DS, %edx
23202 movl %edx, %ds
23203 movl %edx, %es
23204 movl $(__KERNEL_PERCPU), %edx
23205@@ -214,6 +354,15 @@
23206 SET_KERNEL_GS %edx
23207 .endm
23208
23209+.macro SAVE_ALL
23210+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23211+ __SAVE_ALL __KERNEL_DS
23212+ pax_enter_kernel
23213+#else
23214+ __SAVE_ALL __USER_DS
23215+#endif
23216+.endm
23217+
23218 .macro RESTORE_INT_REGS
23219 popl_cfi %ebx
23220 CFI_RESTORE ebx
23221@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
23222 popfl_cfi
23223 jmp syscall_exit
23224 CFI_ENDPROC
23225-END(ret_from_fork)
23226+ENDPROC(ret_from_fork)
23227
23228 ENTRY(ret_from_kernel_thread)
23229 CFI_STARTPROC
23230@@ -340,7 +489,15 @@ ret_from_intr:
23231 andl $SEGMENT_RPL_MASK, %eax
23232 #endif
23233 cmpl $USER_RPL, %eax
23234+
23235+#ifdef CONFIG_PAX_KERNEXEC
23236+ jae resume_userspace
23237+
23238+ pax_exit_kernel
23239+ jmp resume_kernel
23240+#else
23241 jb resume_kernel # not returning to v8086 or userspace
23242+#endif
23243
23244 ENTRY(resume_userspace)
23245 LOCKDEP_SYS_EXIT
23246@@ -352,8 +509,8 @@ ENTRY(resume_userspace)
23247 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
23248 # int/exception return?
23249 jne work_pending
23250- jmp restore_all
23251-END(ret_from_exception)
23252+ jmp restore_all_pax
23253+ENDPROC(ret_from_exception)
23254
23255 #ifdef CONFIG_PREEMPT
23256 ENTRY(resume_kernel)
23257@@ -365,7 +522,7 @@ need_resched:
23258 jz restore_all
23259 call preempt_schedule_irq
23260 jmp need_resched
23261-END(resume_kernel)
23262+ENDPROC(resume_kernel)
23263 #endif
23264 CFI_ENDPROC
23265
23266@@ -395,30 +552,45 @@ sysenter_past_esp:
23267 /*CFI_REL_OFFSET cs, 0*/
23268 /*
23269 * Push current_thread_info()->sysenter_return to the stack.
23270- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
23271- * pushed above; +8 corresponds to copy_thread's esp0 setting.
23272 */
23273- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
23274+ pushl_cfi $0
23275 CFI_REL_OFFSET eip, 0
23276
23277 pushl_cfi %eax
23278 SAVE_ALL
23279+ GET_THREAD_INFO(%ebp)
23280+ movl TI_sysenter_return(%ebp),%ebp
23281+ movl %ebp,PT_EIP(%esp)
23282 ENABLE_INTERRUPTS(CLBR_NONE)
23283
23284 /*
23285 * Load the potential sixth argument from user stack.
23286 * Careful about security.
23287 */
23288+ movl PT_OLDESP(%esp),%ebp
23289+
23290+#ifdef CONFIG_PAX_MEMORY_UDEREF
23291+ mov PT_OLDSS(%esp),%ds
23292+1: movl %ds:(%ebp),%ebp
23293+ push %ss
23294+ pop %ds
23295+#else
23296 cmpl $__PAGE_OFFSET-3,%ebp
23297 jae syscall_fault
23298 ASM_STAC
23299 1: movl (%ebp),%ebp
23300 ASM_CLAC
23301+#endif
23302+
23303 movl %ebp,PT_EBP(%esp)
23304 _ASM_EXTABLE(1b,syscall_fault)
23305
23306 GET_THREAD_INFO(%ebp)
23307
23308+#ifdef CONFIG_PAX_RANDKSTACK
23309+ pax_erase_kstack
23310+#endif
23311+
23312 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
23313 jnz sysenter_audit
23314 sysenter_do_call:
23315@@ -434,12 +606,24 @@ sysenter_after_call:
23316 testl $_TIF_ALLWORK_MASK, %ecx
23317 jne sysexit_audit
23318 sysenter_exit:
23319+
23320+#ifdef CONFIG_PAX_RANDKSTACK
23321+ pushl_cfi %eax
23322+ movl %esp, %eax
23323+ call pax_randomize_kstack
23324+ popl_cfi %eax
23325+#endif
23326+
23327+ pax_erase_kstack
23328+
23329 /* if something modifies registers it must also disable sysexit */
23330 movl PT_EIP(%esp), %edx
23331 movl PT_OLDESP(%esp), %ecx
23332 xorl %ebp,%ebp
23333 TRACE_IRQS_ON
23334 1: mov PT_FS(%esp), %fs
23335+2: mov PT_DS(%esp), %ds
23336+3: mov PT_ES(%esp), %es
23337 PTGS_TO_GS
23338 ENABLE_INTERRUPTS_SYSEXIT
23339
23340@@ -456,6 +640,9 @@ sysenter_audit:
23341 movl %eax,%edx /* 2nd arg: syscall number */
23342 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
23343 call __audit_syscall_entry
23344+
23345+ pax_erase_kstack
23346+
23347 pushl_cfi %ebx
23348 movl PT_EAX(%esp),%eax /* reload syscall number */
23349 jmp sysenter_do_call
23350@@ -481,10 +668,16 @@ sysexit_audit:
23351
23352 CFI_ENDPROC
23353 .pushsection .fixup,"ax"
23354-2: movl $0,PT_FS(%esp)
23355+4: movl $0,PT_FS(%esp)
23356+ jmp 1b
23357+5: movl $0,PT_DS(%esp)
23358+ jmp 1b
23359+6: movl $0,PT_ES(%esp)
23360 jmp 1b
23361 .popsection
23362- _ASM_EXTABLE(1b,2b)
23363+ _ASM_EXTABLE(1b,4b)
23364+ _ASM_EXTABLE(2b,5b)
23365+ _ASM_EXTABLE(3b,6b)
23366 PTGS_TO_GS_EX
23367 ENDPROC(ia32_sysenter_target)
23368
23369@@ -495,6 +688,11 @@ ENTRY(system_call)
23370 pushl_cfi %eax # save orig_eax
23371 SAVE_ALL
23372 GET_THREAD_INFO(%ebp)
23373+
23374+#ifdef CONFIG_PAX_RANDKSTACK
23375+ pax_erase_kstack
23376+#endif
23377+
23378 # system call tracing in operation / emulation
23379 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
23380 jnz syscall_trace_entry
23381@@ -514,6 +712,15 @@ syscall_exit:
23382 testl $_TIF_ALLWORK_MASK, %ecx # current->work
23383 jne syscall_exit_work
23384
23385+restore_all_pax:
23386+
23387+#ifdef CONFIG_PAX_RANDKSTACK
23388+ movl %esp, %eax
23389+ call pax_randomize_kstack
23390+#endif
23391+
23392+ pax_erase_kstack
23393+
23394 restore_all:
23395 TRACE_IRQS_IRET
23396 restore_all_notrace:
23397@@ -568,14 +775,34 @@ ldt_ss:
23398 * compensating for the offset by changing to the ESPFIX segment with
23399 * a base address that matches for the difference.
23400 */
23401-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
23402+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
23403 mov %esp, %edx /* load kernel esp */
23404 mov PT_OLDESP(%esp), %eax /* load userspace esp */
23405 mov %dx, %ax /* eax: new kernel esp */
23406 sub %eax, %edx /* offset (low word is 0) */
23407+#ifdef CONFIG_SMP
23408+ movl PER_CPU_VAR(cpu_number), %ebx
23409+ shll $PAGE_SHIFT_asm, %ebx
23410+ addl $cpu_gdt_table, %ebx
23411+#else
23412+ movl $cpu_gdt_table, %ebx
23413+#endif
23414 shr $16, %edx
23415- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
23416- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
23417+
23418+#ifdef CONFIG_PAX_KERNEXEC
23419+ mov %cr0, %esi
23420+ btr $16, %esi
23421+ mov %esi, %cr0
23422+#endif
23423+
23424+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
23425+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
23426+
23427+#ifdef CONFIG_PAX_KERNEXEC
23428+ bts $16, %esi
23429+ mov %esi, %cr0
23430+#endif
23431+
23432 pushl_cfi $__ESPFIX_SS
23433 pushl_cfi %eax /* new kernel esp */
23434 /* Disable interrupts, but do not irqtrace this section: we
23435@@ -605,20 +832,18 @@ work_resched:
23436 movl TI_flags(%ebp), %ecx
23437 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
23438 # than syscall tracing?
23439- jz restore_all
23440+ jz restore_all_pax
23441 testb $_TIF_NEED_RESCHED, %cl
23442 jnz work_resched
23443
23444 work_notifysig: # deal with pending signals and
23445 # notify-resume requests
23446+ movl %esp, %eax
23447 #ifdef CONFIG_VM86
23448 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
23449- movl %esp, %eax
23450 jne work_notifysig_v86 # returning to kernel-space or
23451 # vm86-space
23452 1:
23453-#else
23454- movl %esp, %eax
23455 #endif
23456 TRACE_IRQS_ON
23457 ENABLE_INTERRUPTS(CLBR_NONE)
23458@@ -639,7 +864,7 @@ work_notifysig_v86:
23459 movl %eax, %esp
23460 jmp 1b
23461 #endif
23462-END(work_pending)
23463+ENDPROC(work_pending)
23464
23465 # perform syscall exit tracing
23466 ALIGN
23467@@ -647,11 +872,14 @@ syscall_trace_entry:
23468 movl $-ENOSYS,PT_EAX(%esp)
23469 movl %esp, %eax
23470 call syscall_trace_enter
23471+
23472+ pax_erase_kstack
23473+
23474 /* What it returned is what we'll actually use. */
23475 cmpl $(NR_syscalls), %eax
23476 jnae syscall_call
23477 jmp syscall_exit
23478-END(syscall_trace_entry)
23479+ENDPROC(syscall_trace_entry)
23480
23481 # perform syscall exit tracing
23482 ALIGN
23483@@ -664,26 +892,30 @@ syscall_exit_work:
23484 movl %esp, %eax
23485 call syscall_trace_leave
23486 jmp resume_userspace
23487-END(syscall_exit_work)
23488+ENDPROC(syscall_exit_work)
23489 CFI_ENDPROC
23490
23491 RING0_INT_FRAME # can't unwind into user space anyway
23492 syscall_fault:
23493+#ifdef CONFIG_PAX_MEMORY_UDEREF
23494+ push %ss
23495+ pop %ds
23496+#endif
23497 ASM_CLAC
23498 GET_THREAD_INFO(%ebp)
23499 movl $-EFAULT,PT_EAX(%esp)
23500 jmp resume_userspace
23501-END(syscall_fault)
23502+ENDPROC(syscall_fault)
23503
23504 syscall_badsys:
23505 movl $-ENOSYS,%eax
23506 jmp syscall_after_call
23507-END(syscall_badsys)
23508+ENDPROC(syscall_badsys)
23509
23510 sysenter_badsys:
23511 movl $-ENOSYS,%eax
23512 jmp sysenter_after_call
23513-END(sysenter_badsys)
23514+ENDPROC(sysenter_badsys)
23515 CFI_ENDPROC
23516
23517 .macro FIXUP_ESPFIX_STACK
23518@@ -696,8 +928,15 @@ END(sysenter_badsys)
23519 */
23520 #ifdef CONFIG_X86_ESPFIX32
23521 /* fixup the stack */
23522- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
23523- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
23524+#ifdef CONFIG_SMP
23525+ movl PER_CPU_VAR(cpu_number), %ebx
23526+ shll $PAGE_SHIFT_asm, %ebx
23527+ addl $cpu_gdt_table, %ebx
23528+#else
23529+ movl $cpu_gdt_table, %ebx
23530+#endif
23531+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
23532+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
23533 shl $16, %eax
23534 addl %esp, %eax /* the adjusted stack pointer */
23535 pushl_cfi $__KERNEL_DS
23536@@ -753,7 +992,7 @@ vector=vector+1
23537 .endr
23538 2: jmp common_interrupt
23539 .endr
23540-END(irq_entries_start)
23541+ENDPROC(irq_entries_start)
23542
23543 .previous
23544 END(interrupt)
23545@@ -810,7 +1049,7 @@ ENTRY(coprocessor_error)
23546 pushl_cfi $do_coprocessor_error
23547 jmp error_code
23548 CFI_ENDPROC
23549-END(coprocessor_error)
23550+ENDPROC(coprocessor_error)
23551
23552 ENTRY(simd_coprocessor_error)
23553 RING0_INT_FRAME
23554@@ -823,7 +1062,7 @@ ENTRY(simd_coprocessor_error)
23555 .section .altinstructions,"a"
23556 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
23557 .previous
23558-.section .altinstr_replacement,"ax"
23559+.section .altinstr_replacement,"a"
23560 663: pushl $do_simd_coprocessor_error
23561 664:
23562 .previous
23563@@ -832,7 +1071,7 @@ ENTRY(simd_coprocessor_error)
23564 #endif
23565 jmp error_code
23566 CFI_ENDPROC
23567-END(simd_coprocessor_error)
23568+ENDPROC(simd_coprocessor_error)
23569
23570 ENTRY(device_not_available)
23571 RING0_INT_FRAME
23572@@ -841,18 +1080,18 @@ ENTRY(device_not_available)
23573 pushl_cfi $do_device_not_available
23574 jmp error_code
23575 CFI_ENDPROC
23576-END(device_not_available)
23577+ENDPROC(device_not_available)
23578
23579 #ifdef CONFIG_PARAVIRT
23580 ENTRY(native_iret)
23581 iret
23582 _ASM_EXTABLE(native_iret, iret_exc)
23583-END(native_iret)
23584+ENDPROC(native_iret)
23585
23586 ENTRY(native_irq_enable_sysexit)
23587 sti
23588 sysexit
23589-END(native_irq_enable_sysexit)
23590+ENDPROC(native_irq_enable_sysexit)
23591 #endif
23592
23593 ENTRY(overflow)
23594@@ -862,7 +1101,7 @@ ENTRY(overflow)
23595 pushl_cfi $do_overflow
23596 jmp error_code
23597 CFI_ENDPROC
23598-END(overflow)
23599+ENDPROC(overflow)
23600
23601 ENTRY(bounds)
23602 RING0_INT_FRAME
23603@@ -871,7 +1110,7 @@ ENTRY(bounds)
23604 pushl_cfi $do_bounds
23605 jmp error_code
23606 CFI_ENDPROC
23607-END(bounds)
23608+ENDPROC(bounds)
23609
23610 ENTRY(invalid_op)
23611 RING0_INT_FRAME
23612@@ -880,7 +1119,7 @@ ENTRY(invalid_op)
23613 pushl_cfi $do_invalid_op
23614 jmp error_code
23615 CFI_ENDPROC
23616-END(invalid_op)
23617+ENDPROC(invalid_op)
23618
23619 ENTRY(coprocessor_segment_overrun)
23620 RING0_INT_FRAME
23621@@ -889,7 +1128,7 @@ ENTRY(coprocessor_segment_overrun)
23622 pushl_cfi $do_coprocessor_segment_overrun
23623 jmp error_code
23624 CFI_ENDPROC
23625-END(coprocessor_segment_overrun)
23626+ENDPROC(coprocessor_segment_overrun)
23627
23628 ENTRY(invalid_TSS)
23629 RING0_EC_FRAME
23630@@ -897,7 +1136,7 @@ ENTRY(invalid_TSS)
23631 pushl_cfi $do_invalid_TSS
23632 jmp error_code
23633 CFI_ENDPROC
23634-END(invalid_TSS)
23635+ENDPROC(invalid_TSS)
23636
23637 ENTRY(segment_not_present)
23638 RING0_EC_FRAME
23639@@ -905,7 +1144,7 @@ ENTRY(segment_not_present)
23640 pushl_cfi $do_segment_not_present
23641 jmp error_code
23642 CFI_ENDPROC
23643-END(segment_not_present)
23644+ENDPROC(segment_not_present)
23645
23646 ENTRY(stack_segment)
23647 RING0_EC_FRAME
23648@@ -913,7 +1152,7 @@ ENTRY(stack_segment)
23649 pushl_cfi $do_stack_segment
23650 jmp error_code
23651 CFI_ENDPROC
23652-END(stack_segment)
23653+ENDPROC(stack_segment)
23654
23655 ENTRY(alignment_check)
23656 RING0_EC_FRAME
23657@@ -921,7 +1160,7 @@ ENTRY(alignment_check)
23658 pushl_cfi $do_alignment_check
23659 jmp error_code
23660 CFI_ENDPROC
23661-END(alignment_check)
23662+ENDPROC(alignment_check)
23663
23664 ENTRY(divide_error)
23665 RING0_INT_FRAME
23666@@ -930,7 +1169,7 @@ ENTRY(divide_error)
23667 pushl_cfi $do_divide_error
23668 jmp error_code
23669 CFI_ENDPROC
23670-END(divide_error)
23671+ENDPROC(divide_error)
23672
23673 #ifdef CONFIG_X86_MCE
23674 ENTRY(machine_check)
23675@@ -940,7 +1179,7 @@ ENTRY(machine_check)
23676 pushl_cfi machine_check_vector
23677 jmp error_code
23678 CFI_ENDPROC
23679-END(machine_check)
23680+ENDPROC(machine_check)
23681 #endif
23682
23683 ENTRY(spurious_interrupt_bug)
23684@@ -950,7 +1189,7 @@ ENTRY(spurious_interrupt_bug)
23685 pushl_cfi $do_spurious_interrupt_bug
23686 jmp error_code
23687 CFI_ENDPROC
23688-END(spurious_interrupt_bug)
23689+ENDPROC(spurious_interrupt_bug)
23690
23691 #ifdef CONFIG_XEN
23692 /* Xen doesn't set %esp to be precisely what the normal sysenter
23693@@ -1056,7 +1295,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
23694
23695 ENTRY(mcount)
23696 ret
23697-END(mcount)
23698+ENDPROC(mcount)
23699
23700 ENTRY(ftrace_caller)
23701 pushl %eax
23702@@ -1086,7 +1325,7 @@ ftrace_graph_call:
23703 .globl ftrace_stub
23704 ftrace_stub:
23705 ret
23706-END(ftrace_caller)
23707+ENDPROC(ftrace_caller)
23708
23709 ENTRY(ftrace_regs_caller)
23710 pushf /* push flags before compare (in cs location) */
23711@@ -1184,7 +1423,7 @@ trace:
23712 popl %ecx
23713 popl %eax
23714 jmp ftrace_stub
23715-END(mcount)
23716+ENDPROC(mcount)
23717 #endif /* CONFIG_DYNAMIC_FTRACE */
23718 #endif /* CONFIG_FUNCTION_TRACER */
23719
23720@@ -1202,7 +1441,7 @@ ENTRY(ftrace_graph_caller)
23721 popl %ecx
23722 popl %eax
23723 ret
23724-END(ftrace_graph_caller)
23725+ENDPROC(ftrace_graph_caller)
23726
23727 .globl return_to_handler
23728 return_to_handler:
23729@@ -1263,15 +1502,18 @@ error_code:
23730 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
23731 REG_TO_PTGS %ecx
23732 SET_KERNEL_GS %ecx
23733- movl $(__USER_DS), %ecx
23734+ movl $(__KERNEL_DS), %ecx
23735 movl %ecx, %ds
23736 movl %ecx, %es
23737+
23738+ pax_enter_kernel
23739+
23740 TRACE_IRQS_OFF
23741 movl %esp,%eax # pt_regs pointer
23742 call *%edi
23743 jmp ret_from_exception
23744 CFI_ENDPROC
23745-END(page_fault)
23746+ENDPROC(page_fault)
23747
23748 /*
23749 * Debug traps and NMI can happen at the one SYSENTER instruction
23750@@ -1314,7 +1556,7 @@ debug_stack_correct:
23751 call do_debug
23752 jmp ret_from_exception
23753 CFI_ENDPROC
23754-END(debug)
23755+ENDPROC(debug)
23756
23757 /*
23758 * NMI is doubly nasty. It can happen _while_ we're handling
23759@@ -1354,6 +1596,9 @@ nmi_stack_correct:
23760 xorl %edx,%edx # zero error code
23761 movl %esp,%eax # pt_regs pointer
23762 call do_nmi
23763+
23764+ pax_exit_kernel
23765+
23766 jmp restore_all_notrace
23767 CFI_ENDPROC
23768
23769@@ -1391,13 +1636,16 @@ nmi_espfix_stack:
23770 FIXUP_ESPFIX_STACK # %eax == %esp
23771 xorl %edx,%edx # zero error code
23772 call do_nmi
23773+
23774+ pax_exit_kernel
23775+
23776 RESTORE_REGS
23777 lss 12+4(%esp), %esp # back to espfix stack
23778 CFI_ADJUST_CFA_OFFSET -24
23779 jmp irq_return
23780 #endif
23781 CFI_ENDPROC
23782-END(nmi)
23783+ENDPROC(nmi)
23784
23785 ENTRY(int3)
23786 RING0_INT_FRAME
23787@@ -1410,14 +1658,14 @@ ENTRY(int3)
23788 call do_int3
23789 jmp ret_from_exception
23790 CFI_ENDPROC
23791-END(int3)
23792+ENDPROC(int3)
23793
23794 ENTRY(general_protection)
23795 RING0_EC_FRAME
23796 pushl_cfi $do_general_protection
23797 jmp error_code
23798 CFI_ENDPROC
23799-END(general_protection)
23800+ENDPROC(general_protection)
23801
23802 #ifdef CONFIG_KVM_GUEST
23803 ENTRY(async_page_fault)
23804@@ -1426,6 +1674,6 @@ ENTRY(async_page_fault)
23805 pushl_cfi $do_async_page_fault
23806 jmp error_code
23807 CFI_ENDPROC
23808-END(async_page_fault)
23809+ENDPROC(async_page_fault)
23810 #endif
23811
23812diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
23813index 2fac134..b020fca 100644
23814--- a/arch/x86/kernel/entry_64.S
23815+++ b/arch/x86/kernel/entry_64.S
23816@@ -59,6 +59,8 @@
23817 #include <asm/smap.h>
23818 #include <asm/pgtable_types.h>
23819 #include <linux/err.h>
23820+#include <asm/pgtable.h>
23821+#include <asm/alternative-asm.h>
23822
23823 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
23824 #include <linux/elf-em.h>
23825@@ -81,6 +83,430 @@ ENTRY(native_usergs_sysret64)
23826 ENDPROC(native_usergs_sysret64)
23827 #endif /* CONFIG_PARAVIRT */
23828
23829+ .macro ljmpq sel, off
23830+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
23831+ .byte 0x48; ljmp *1234f(%rip)
23832+ .pushsection .rodata
23833+ .align 16
23834+ 1234: .quad \off; .word \sel
23835+ .popsection
23836+#else
23837+ pushq $\sel
23838+ pushq $\off
23839+ lretq
23840+#endif
23841+ .endm
23842+
23843+ .macro pax_enter_kernel
23844+ pax_set_fptr_mask
23845+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23846+ call pax_enter_kernel
23847+#endif
23848+ .endm
23849+
23850+ .macro pax_exit_kernel
23851+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23852+ call pax_exit_kernel
23853+#endif
23854+
23855+ .endm
23856+
23857+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23858+ENTRY(pax_enter_kernel)
23859+ pushq %rdi
23860+
23861+#ifdef CONFIG_PARAVIRT
23862+ PV_SAVE_REGS(CLBR_RDI)
23863+#endif
23864+
23865+#ifdef CONFIG_PAX_KERNEXEC
23866+ GET_CR0_INTO_RDI
23867+ bts $16,%rdi
23868+ jnc 3f
23869+ mov %cs,%edi
23870+ cmp $__KERNEL_CS,%edi
23871+ jnz 2f
23872+1:
23873+#endif
23874+
23875+#ifdef CONFIG_PAX_MEMORY_UDEREF
23876+ 661: jmp 111f
23877+ .pushsection .altinstr_replacement, "a"
23878+ 662: ASM_NOP2
23879+ .popsection
23880+ .pushsection .altinstructions, "a"
23881+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23882+ .popsection
23883+ GET_CR3_INTO_RDI
23884+ cmp $0,%dil
23885+ jnz 112f
23886+ mov $__KERNEL_DS,%edi
23887+ mov %edi,%ss
23888+ jmp 111f
23889+112: cmp $1,%dil
23890+ jz 113f
23891+ ud2
23892+113: sub $4097,%rdi
23893+ bts $63,%rdi
23894+ SET_RDI_INTO_CR3
23895+ mov $__UDEREF_KERNEL_DS,%edi
23896+ mov %edi,%ss
23897+111:
23898+#endif
23899+
23900+#ifdef CONFIG_PARAVIRT
23901+ PV_RESTORE_REGS(CLBR_RDI)
23902+#endif
23903+
23904+ popq %rdi
23905+ pax_force_retaddr
23906+ retq
23907+
23908+#ifdef CONFIG_PAX_KERNEXEC
23909+2: ljmpq __KERNEL_CS,1b
23910+3: ljmpq __KERNEXEC_KERNEL_CS,4f
23911+4: SET_RDI_INTO_CR0
23912+ jmp 1b
23913+#endif
23914+ENDPROC(pax_enter_kernel)
23915+
23916+ENTRY(pax_exit_kernel)
23917+ pushq %rdi
23918+
23919+#ifdef CONFIG_PARAVIRT
23920+ PV_SAVE_REGS(CLBR_RDI)
23921+#endif
23922+
23923+#ifdef CONFIG_PAX_KERNEXEC
23924+ mov %cs,%rdi
23925+ cmp $__KERNEXEC_KERNEL_CS,%edi
23926+ jz 2f
23927+ GET_CR0_INTO_RDI
23928+ bts $16,%rdi
23929+ jnc 4f
23930+1:
23931+#endif
23932+
23933+#ifdef CONFIG_PAX_MEMORY_UDEREF
23934+ 661: jmp 111f
23935+ .pushsection .altinstr_replacement, "a"
23936+ 662: ASM_NOP2
23937+ .popsection
23938+ .pushsection .altinstructions, "a"
23939+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23940+ .popsection
23941+ mov %ss,%edi
23942+ cmp $__UDEREF_KERNEL_DS,%edi
23943+ jnz 111f
23944+ GET_CR3_INTO_RDI
23945+ cmp $0,%dil
23946+ jz 112f
23947+ ud2
23948+112: add $4097,%rdi
23949+ bts $63,%rdi
23950+ SET_RDI_INTO_CR3
23951+ mov $__KERNEL_DS,%edi
23952+ mov %edi,%ss
23953+111:
23954+#endif
23955+
23956+#ifdef CONFIG_PARAVIRT
23957+ PV_RESTORE_REGS(CLBR_RDI);
23958+#endif
23959+
23960+ popq %rdi
23961+ pax_force_retaddr
23962+ retq
23963+
23964+#ifdef CONFIG_PAX_KERNEXEC
23965+2: GET_CR0_INTO_RDI
23966+ btr $16,%rdi
23967+ jnc 4f
23968+ ljmpq __KERNEL_CS,3f
23969+3: SET_RDI_INTO_CR0
23970+ jmp 1b
23971+4: ud2
23972+ jmp 4b
23973+#endif
23974+ENDPROC(pax_exit_kernel)
23975+#endif
23976+
23977+ .macro pax_enter_kernel_user
23978+ pax_set_fptr_mask
23979+#ifdef CONFIG_PAX_MEMORY_UDEREF
23980+ call pax_enter_kernel_user
23981+#endif
23982+ .endm
23983+
23984+ .macro pax_exit_kernel_user
23985+#ifdef CONFIG_PAX_MEMORY_UDEREF
23986+ call pax_exit_kernel_user
23987+#endif
23988+#ifdef CONFIG_PAX_RANDKSTACK
23989+ pushq %rax
23990+ pushq %r11
23991+ call pax_randomize_kstack
23992+ popq %r11
23993+ popq %rax
23994+#endif
23995+ .endm
23996+
23997+#ifdef CONFIG_PAX_MEMORY_UDEREF
23998+ENTRY(pax_enter_kernel_user)
23999+ pushq %rdi
24000+ pushq %rbx
24001+
24002+#ifdef CONFIG_PARAVIRT
24003+ PV_SAVE_REGS(CLBR_RDI)
24004+#endif
24005+
24006+ 661: jmp 111f
24007+ .pushsection .altinstr_replacement, "a"
24008+ 662: ASM_NOP2
24009+ .popsection
24010+ .pushsection .altinstructions, "a"
24011+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
24012+ .popsection
24013+ GET_CR3_INTO_RDI
24014+ cmp $1,%dil
24015+ jnz 4f
24016+ sub $4097,%rdi
24017+ bts $63,%rdi
24018+ SET_RDI_INTO_CR3
24019+ jmp 3f
24020+111:
24021+
24022+ GET_CR3_INTO_RDI
24023+ mov %rdi,%rbx
24024+ add $__START_KERNEL_map,%rbx
24025+ sub phys_base(%rip),%rbx
24026+
24027+#ifdef CONFIG_PARAVIRT
24028+ cmpl $0, pv_info+PARAVIRT_enabled
24029+ jz 1f
24030+ pushq %rdi
24031+ i = 0
24032+ .rept USER_PGD_PTRS
24033+ mov i*8(%rbx),%rsi
24034+ mov $0,%sil
24035+ lea i*8(%rbx),%rdi
24036+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
24037+ i = i + 1
24038+ .endr
24039+ popq %rdi
24040+ jmp 2f
24041+1:
24042+#endif
24043+
24044+ i = 0
24045+ .rept USER_PGD_PTRS
24046+ movb $0,i*8(%rbx)
24047+ i = i + 1
24048+ .endr
24049+
24050+2: SET_RDI_INTO_CR3
24051+
24052+#ifdef CONFIG_PAX_KERNEXEC
24053+ GET_CR0_INTO_RDI
24054+ bts $16,%rdi
24055+ SET_RDI_INTO_CR0
24056+#endif
24057+
24058+3:
24059+
24060+#ifdef CONFIG_PARAVIRT
24061+ PV_RESTORE_REGS(CLBR_RDI)
24062+#endif
24063+
24064+ popq %rbx
24065+ popq %rdi
24066+ pax_force_retaddr
24067+ retq
24068+4: ud2
24069+ENDPROC(pax_enter_kernel_user)
24070+
24071+ENTRY(pax_exit_kernel_user)
24072+ pushq %rdi
24073+ pushq %rbx
24074+
24075+#ifdef CONFIG_PARAVIRT
24076+ PV_SAVE_REGS(CLBR_RDI)
24077+#endif
24078+
24079+ GET_CR3_INTO_RDI
24080+ 661: jmp 1f
24081+ .pushsection .altinstr_replacement, "a"
24082+ 662: ASM_NOP2
24083+ .popsection
24084+ .pushsection .altinstructions, "a"
24085+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
24086+ .popsection
24087+ cmp $0,%dil
24088+ jnz 3f
24089+ add $4097,%rdi
24090+ bts $63,%rdi
24091+ SET_RDI_INTO_CR3
24092+ jmp 2f
24093+1:
24094+
24095+ mov %rdi,%rbx
24096+
24097+#ifdef CONFIG_PAX_KERNEXEC
24098+ GET_CR0_INTO_RDI
24099+ btr $16,%rdi
24100+ jnc 3f
24101+ SET_RDI_INTO_CR0
24102+#endif
24103+
24104+ add $__START_KERNEL_map,%rbx
24105+ sub phys_base(%rip),%rbx
24106+
24107+#ifdef CONFIG_PARAVIRT
24108+ cmpl $0, pv_info+PARAVIRT_enabled
24109+ jz 1f
24110+ i = 0
24111+ .rept USER_PGD_PTRS
24112+ mov i*8(%rbx),%rsi
24113+ mov $0x67,%sil
24114+ lea i*8(%rbx),%rdi
24115+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
24116+ i = i + 1
24117+ .endr
24118+ jmp 2f
24119+1:
24120+#endif
24121+
24122+ i = 0
24123+ .rept USER_PGD_PTRS
24124+ movb $0x67,i*8(%rbx)
24125+ i = i + 1
24126+ .endr
24127+2:
24128+
24129+#ifdef CONFIG_PARAVIRT
24130+ PV_RESTORE_REGS(CLBR_RDI)
24131+#endif
24132+
24133+ popq %rbx
24134+ popq %rdi
24135+ pax_force_retaddr
24136+ retq
24137+3: ud2
24138+ENDPROC(pax_exit_kernel_user)
24139+#endif
24140+
24141+ .macro pax_enter_kernel_nmi
24142+ pax_set_fptr_mask
24143+
24144+#ifdef CONFIG_PAX_KERNEXEC
24145+ GET_CR0_INTO_RDI
24146+ bts $16,%rdi
24147+ jc 110f
24148+ SET_RDI_INTO_CR0
24149+ or $2,%ebx
24150+110:
24151+#endif
24152+
24153+#ifdef CONFIG_PAX_MEMORY_UDEREF
24154+ 661: jmp 111f
24155+ .pushsection .altinstr_replacement, "a"
24156+ 662: ASM_NOP2
24157+ .popsection
24158+ .pushsection .altinstructions, "a"
24159+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
24160+ .popsection
24161+ GET_CR3_INTO_RDI
24162+ cmp $0,%dil
24163+ jz 111f
24164+ sub $4097,%rdi
24165+ or $4,%ebx
24166+ bts $63,%rdi
24167+ SET_RDI_INTO_CR3
24168+ mov $__UDEREF_KERNEL_DS,%edi
24169+ mov %edi,%ss
24170+111:
24171+#endif
24172+ .endm
24173+
24174+ .macro pax_exit_kernel_nmi
24175+#ifdef CONFIG_PAX_KERNEXEC
24176+ btr $1,%ebx
24177+ jnc 110f
24178+ GET_CR0_INTO_RDI
24179+ btr $16,%rdi
24180+ SET_RDI_INTO_CR0
24181+110:
24182+#endif
24183+
24184+#ifdef CONFIG_PAX_MEMORY_UDEREF
24185+ btr $2,%ebx
24186+ jnc 111f
24187+ GET_CR3_INTO_RDI
24188+ add $4097,%rdi
24189+ bts $63,%rdi
24190+ SET_RDI_INTO_CR3
24191+ mov $__KERNEL_DS,%edi
24192+ mov %edi,%ss
24193+111:
24194+#endif
24195+ .endm
24196+
24197+ .macro pax_erase_kstack
24198+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
24199+ call pax_erase_kstack
24200+#endif
24201+ .endm
24202+
24203+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
24204+ENTRY(pax_erase_kstack)
24205+ pushq %rdi
24206+ pushq %rcx
24207+ pushq %rax
24208+ pushq %r11
24209+
24210+ GET_THREAD_INFO(%r11)
24211+ mov TI_lowest_stack(%r11), %rdi
24212+ mov $-0xBEEF, %rax
24213+ std
24214+
24215+1: mov %edi, %ecx
24216+ and $THREAD_SIZE_asm - 1, %ecx
24217+ shr $3, %ecx
24218+ repne scasq
24219+ jecxz 2f
24220+
24221+ cmp $2*8, %ecx
24222+ jc 2f
24223+
24224+ mov $2*8, %ecx
24225+ repe scasq
24226+ jecxz 2f
24227+ jne 1b
24228+
24229+2: cld
24230+ mov %esp, %ecx
24231+ sub %edi, %ecx
24232+
24233+ cmp $THREAD_SIZE_asm, %rcx
24234+ jb 3f
24235+ ud2
24236+3:
24237+
24238+ shr $3, %ecx
24239+ rep stosq
24240+
24241+ mov TI_task_thread_sp0(%r11), %rdi
24242+ sub $256, %rdi
24243+ mov %rdi, TI_lowest_stack(%r11)
24244+
24245+ popq %r11
24246+ popq %rax
24247+ popq %rcx
24248+ popq %rdi
24249+ pax_force_retaddr
24250+ ret
24251+ENDPROC(pax_erase_kstack)
24252+#endif
24253
24254 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
24255 #ifdef CONFIG_TRACE_IRQFLAGS
24256@@ -117,7 +543,7 @@ ENDPROC(native_usergs_sysret64)
24257 .endm
24258
24259 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
24260- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
24261+ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
24262 jnc 1f
24263 TRACE_IRQS_ON_DEBUG
24264 1:
24265@@ -155,27 +581,6 @@ ENDPROC(native_usergs_sysret64)
24266 movq \tmp,R11+\offset(%rsp)
24267 .endm
24268
24269- .macro FAKE_STACK_FRAME child_rip
24270- /* push in order ss, rsp, eflags, cs, rip */
24271- xorl %eax, %eax
24272- pushq_cfi $__KERNEL_DS /* ss */
24273- /*CFI_REL_OFFSET ss,0*/
24274- pushq_cfi %rax /* rsp */
24275- CFI_REL_OFFSET rsp,0
24276- pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
24277- /*CFI_REL_OFFSET rflags,0*/
24278- pushq_cfi $__KERNEL_CS /* cs */
24279- /*CFI_REL_OFFSET cs,0*/
24280- pushq_cfi \child_rip /* rip */
24281- CFI_REL_OFFSET rip,0
24282- pushq_cfi %rax /* orig rax */
24283- .endm
24284-
24285- .macro UNFAKE_STACK_FRAME
24286- addq $8*6, %rsp
24287- CFI_ADJUST_CFA_OFFSET -(6*8)
24288- .endm
24289-
24290 /*
24291 * initial frame state for interrupts (and exceptions without error code)
24292 */
24293@@ -241,25 +646,26 @@ ENDPROC(native_usergs_sysret64)
24294 /* save partial stack frame */
24295 .macro SAVE_ARGS_IRQ
24296 cld
24297- /* start from rbp in pt_regs and jump over */
24298- movq_cfi rdi, (RDI-RBP)
24299- movq_cfi rsi, (RSI-RBP)
24300- movq_cfi rdx, (RDX-RBP)
24301- movq_cfi rcx, (RCX-RBP)
24302- movq_cfi rax, (RAX-RBP)
24303- movq_cfi r8, (R8-RBP)
24304- movq_cfi r9, (R9-RBP)
24305- movq_cfi r10, (R10-RBP)
24306- movq_cfi r11, (R11-RBP)
24307+ /* start from r15 in pt_regs and jump over */
24308+ movq_cfi rdi, RDI
24309+ movq_cfi rsi, RSI
24310+ movq_cfi rdx, RDX
24311+ movq_cfi rcx, RCX
24312+ movq_cfi rax, RAX
24313+ movq_cfi r8, R8
24314+ movq_cfi r9, R9
24315+ movq_cfi r10, R10
24316+ movq_cfi r11, R11
24317+ movq_cfi r12, R12
24318
24319 /* Save rbp so that we can unwind from get_irq_regs() */
24320- movq_cfi rbp, 0
24321+ movq_cfi rbp, RBP
24322
24323 /* Save previous stack value */
24324 movq %rsp, %rsi
24325
24326- leaq -RBP(%rsp),%rdi /* arg1 for handler */
24327- testl $3, CS-RBP(%rsi)
24328+ movq %rsp,%rdi /* arg1 for handler */
24329+ testb $3, CS(%rsi)
24330 je 1f
24331 SWAPGS
24332 /*
24333@@ -279,6 +685,18 @@ ENDPROC(native_usergs_sysret64)
24334 0x06 /* DW_OP_deref */, \
24335 0x08 /* DW_OP_const1u */, SS+8-RBP, \
24336 0x22 /* DW_OP_plus */
24337+
24338+#ifdef CONFIG_PAX_MEMORY_UDEREF
24339+ testb $3, CS(%rdi)
24340+ jnz 1f
24341+ pax_enter_kernel
24342+ jmp 2f
24343+1: pax_enter_kernel_user
24344+2:
24345+#else
24346+ pax_enter_kernel
24347+#endif
24348+
24349 /* We entered an interrupt context - irqs are off: */
24350 TRACE_IRQS_OFF
24351 .endm
24352@@ -308,9 +726,52 @@ ENTRY(save_paranoid)
24353 js 1f /* negative -> in kernel */
24354 SWAPGS
24355 xorl %ebx,%ebx
24356-1: ret
24357+1:
24358+#ifdef CONFIG_PAX_MEMORY_UDEREF
24359+ testb $3, CS+8(%rsp)
24360+ jnz 1f
24361+ pax_enter_kernel
24362+ jmp 2f
24363+1: pax_enter_kernel_user
24364+2:
24365+#else
24366+ pax_enter_kernel
24367+#endif
24368+ pax_force_retaddr
24369+ ret
24370 CFI_ENDPROC
24371-END(save_paranoid)
24372+ENDPROC(save_paranoid)
24373+
24374+ENTRY(save_paranoid_nmi)
24375+ XCPT_FRAME 1 RDI+8
24376+ cld
24377+ movq_cfi rdi, RDI+8
24378+ movq_cfi rsi, RSI+8
24379+ movq_cfi rdx, RDX+8
24380+ movq_cfi rcx, RCX+8
24381+ movq_cfi rax, RAX+8
24382+ movq_cfi r8, R8+8
24383+ movq_cfi r9, R9+8
24384+ movq_cfi r10, R10+8
24385+ movq_cfi r11, R11+8
24386+ movq_cfi rbx, RBX+8
24387+ movq_cfi rbp, RBP+8
24388+ movq_cfi r12, R12+8
24389+ movq_cfi r13, R13+8
24390+ movq_cfi r14, R14+8
24391+ movq_cfi r15, R15+8
24392+ movl $1,%ebx
24393+ movl $MSR_GS_BASE,%ecx
24394+ rdmsr
24395+ testl %edx,%edx
24396+ js 1f /* negative -> in kernel */
24397+ SWAPGS
24398+ xorl %ebx,%ebx
24399+1: pax_enter_kernel_nmi
24400+ pax_force_retaddr
24401+ ret
24402+ CFI_ENDPROC
24403+ENDPROC(save_paranoid_nmi)
24404
24405 /*
24406 * A newly forked process directly context switches into this address.
24407@@ -331,7 +792,7 @@ ENTRY(ret_from_fork)
24408
24409 RESTORE_REST
24410
24411- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
24412+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
24413 jz 1f
24414
24415 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
24416@@ -341,15 +802,13 @@ ENTRY(ret_from_fork)
24417 jmp ret_from_sys_call # go to the SYSRET fastpath
24418
24419 1:
24420- subq $REST_SKIP, %rsp # leave space for volatiles
24421- CFI_ADJUST_CFA_OFFSET REST_SKIP
24422 movq %rbp, %rdi
24423 call *%rbx
24424 movl $0, RAX(%rsp)
24425 RESTORE_REST
24426 jmp int_ret_from_sys_call
24427 CFI_ENDPROC
24428-END(ret_from_fork)
24429+ENDPROC(ret_from_fork)
24430
24431 /*
24432 * System call entry. Up to 6 arguments in registers are supported.
24433@@ -386,7 +845,7 @@ END(ret_from_fork)
24434 ENTRY(system_call)
24435 CFI_STARTPROC simple
24436 CFI_SIGNAL_FRAME
24437- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
24438+ CFI_DEF_CFA rsp,0
24439 CFI_REGISTER rip,rcx
24440 /*CFI_REGISTER rflags,r11*/
24441 SWAPGS_UNSAFE_STACK
24442@@ -399,16 +858,23 @@ GLOBAL(system_call_after_swapgs)
24443
24444 movq %rsp,PER_CPU_VAR(old_rsp)
24445 movq PER_CPU_VAR(kernel_stack),%rsp
24446+ SAVE_ARGS 8*6,0
24447+ pax_enter_kernel_user
24448+
24449+#ifdef CONFIG_PAX_RANDKSTACK
24450+ pax_erase_kstack
24451+#endif
24452+
24453 /*
24454 * No need to follow this irqs off/on section - it's straight
24455 * and short:
24456 */
24457 ENABLE_INTERRUPTS(CLBR_NONE)
24458- SAVE_ARGS 8,0
24459 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
24460 movq %rcx,RIP-ARGOFFSET(%rsp)
24461 CFI_REL_OFFSET rip,RIP-ARGOFFSET
24462- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
24463+ GET_THREAD_INFO(%rcx)
24464+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
24465 jnz tracesys
24466 system_call_fastpath:
24467 #if __SYSCALL_MASK == ~0
24468@@ -432,10 +898,13 @@ sysret_check:
24469 LOCKDEP_SYS_EXIT
24470 DISABLE_INTERRUPTS(CLBR_NONE)
24471 TRACE_IRQS_OFF
24472- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
24473+ GET_THREAD_INFO(%rcx)
24474+ movl TI_flags(%rcx),%edx
24475 andl %edi,%edx
24476 jnz sysret_careful
24477 CFI_REMEMBER_STATE
24478+ pax_exit_kernel_user
24479+ pax_erase_kstack
24480 /*
24481 * sysretq will re-enable interrupts:
24482 */
24483@@ -494,6 +963,9 @@ auditsys:
24484 movq %rax,%rsi /* 2nd arg: syscall number */
24485 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
24486 call __audit_syscall_entry
24487+
24488+ pax_erase_kstack
24489+
24490 LOAD_ARGS 0 /* reload call-clobbered registers */
24491 jmp system_call_fastpath
24492
24493@@ -515,7 +987,7 @@ sysret_audit:
24494 /* Do syscall tracing */
24495 tracesys:
24496 #ifdef CONFIG_AUDITSYSCALL
24497- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
24498+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
24499 jz auditsys
24500 #endif
24501 SAVE_REST
24502@@ -523,12 +995,15 @@ tracesys:
24503 FIXUP_TOP_OF_STACK %rdi
24504 movq %rsp,%rdi
24505 call syscall_trace_enter
24506+
24507+ pax_erase_kstack
24508+
24509 /*
24510 * Reload arg registers from stack in case ptrace changed them.
24511 * We don't reload %rax because syscall_trace_enter() returned
24512 * the value it wants us to use in the table lookup.
24513 */
24514- LOAD_ARGS ARGOFFSET, 1
24515+ LOAD_ARGS 1
24516 RESTORE_REST
24517 #if __SYSCALL_MASK == ~0
24518 cmpq $__NR_syscall_max,%rax
24519@@ -558,7 +1033,9 @@ GLOBAL(int_with_check)
24520 andl %edi,%edx
24521 jnz int_careful
24522 andl $~TS_COMPAT,TI_status(%rcx)
24523- jmp retint_swapgs
24524+ pax_exit_kernel_user
24525+ pax_erase_kstack
24526+ jmp retint_swapgs_pax
24527
24528 /* Either reschedule or signal or syscall exit tracking needed. */
24529 /* First do a reschedule test. */
24530@@ -604,7 +1081,7 @@ int_restore_rest:
24531 TRACE_IRQS_OFF
24532 jmp int_with_check
24533 CFI_ENDPROC
24534-END(system_call)
24535+ENDPROC(system_call)
24536
24537 .macro FORK_LIKE func
24538 ENTRY(stub_\func)
24539@@ -617,9 +1094,10 @@ ENTRY(stub_\func)
24540 DEFAULT_FRAME 0 8 /* offset 8: return address */
24541 call sys_\func
24542 RESTORE_TOP_OF_STACK %r11, 8
24543- ret $REST_SKIP /* pop extended registers */
24544+ pax_force_retaddr
24545+ ret
24546 CFI_ENDPROC
24547-END(stub_\func)
24548+ENDPROC(stub_\func)
24549 .endm
24550
24551 .macro FIXED_FRAME label,func
24552@@ -629,9 +1107,10 @@ ENTRY(\label)
24553 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
24554 call \func
24555 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
24556+ pax_force_retaddr
24557 ret
24558 CFI_ENDPROC
24559-END(\label)
24560+ENDPROC(\label)
24561 .endm
24562
24563 FORK_LIKE clone
24564@@ -639,19 +1118,6 @@ END(\label)
24565 FORK_LIKE vfork
24566 FIXED_FRAME stub_iopl, sys_iopl
24567
24568-ENTRY(ptregscall_common)
24569- DEFAULT_FRAME 1 8 /* offset 8: return address */
24570- RESTORE_TOP_OF_STACK %r11, 8
24571- movq_cfi_restore R15+8, r15
24572- movq_cfi_restore R14+8, r14
24573- movq_cfi_restore R13+8, r13
24574- movq_cfi_restore R12+8, r12
24575- movq_cfi_restore RBP+8, rbp
24576- movq_cfi_restore RBX+8, rbx
24577- ret $REST_SKIP /* pop extended registers */
24578- CFI_ENDPROC
24579-END(ptregscall_common)
24580-
24581 ENTRY(stub_execve)
24582 CFI_STARTPROC
24583 addq $8, %rsp
24584@@ -663,7 +1129,7 @@ ENTRY(stub_execve)
24585 RESTORE_REST
24586 jmp int_ret_from_sys_call
24587 CFI_ENDPROC
24588-END(stub_execve)
24589+ENDPROC(stub_execve)
24590
24591 /*
24592 * sigreturn is special because it needs to restore all registers on return.
24593@@ -680,7 +1146,7 @@ ENTRY(stub_rt_sigreturn)
24594 RESTORE_REST
24595 jmp int_ret_from_sys_call
24596 CFI_ENDPROC
24597-END(stub_rt_sigreturn)
24598+ENDPROC(stub_rt_sigreturn)
24599
24600 #ifdef CONFIG_X86_X32_ABI
24601 ENTRY(stub_x32_rt_sigreturn)
24602@@ -694,7 +1160,7 @@ ENTRY(stub_x32_rt_sigreturn)
24603 RESTORE_REST
24604 jmp int_ret_from_sys_call
24605 CFI_ENDPROC
24606-END(stub_x32_rt_sigreturn)
24607+ENDPROC(stub_x32_rt_sigreturn)
24608
24609 ENTRY(stub_x32_execve)
24610 CFI_STARTPROC
24611@@ -708,7 +1174,7 @@ ENTRY(stub_x32_execve)
24612 RESTORE_REST
24613 jmp int_ret_from_sys_call
24614 CFI_ENDPROC
24615-END(stub_x32_execve)
24616+ENDPROC(stub_x32_execve)
24617
24618 #endif
24619
24620@@ -745,7 +1211,7 @@ vector=vector+1
24621 2: jmp common_interrupt
24622 .endr
24623 CFI_ENDPROC
24624-END(irq_entries_start)
24625+ENDPROC(irq_entries_start)
24626
24627 .previous
24628 END(interrupt)
24629@@ -762,8 +1228,8 @@ END(interrupt)
24630 /* 0(%rsp): ~(interrupt number) */
24631 .macro interrupt func
24632 /* reserve pt_regs for scratch regs and rbp */
24633- subq $ORIG_RAX-RBP, %rsp
24634- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
24635+ subq $ORIG_RAX, %rsp
24636+ CFI_ADJUST_CFA_OFFSET ORIG_RAX
24637 SAVE_ARGS_IRQ
24638 call \func
24639 .endm
24640@@ -786,14 +1252,14 @@ ret_from_intr:
24641
24642 /* Restore saved previous stack */
24643 popq %rsi
24644- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
24645- leaq ARGOFFSET-RBP(%rsi), %rsp
24646+ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
24647+ movq %rsi, %rsp
24648 CFI_DEF_CFA_REGISTER rsp
24649- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
24650+ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
24651
24652 exit_intr:
24653 GET_THREAD_INFO(%rcx)
24654- testl $3,CS-ARGOFFSET(%rsp)
24655+ testb $3,CS-ARGOFFSET(%rsp)
24656 je retint_kernel
24657
24658 /* Interrupt came from user space */
24659@@ -815,12 +1281,35 @@ retint_swapgs: /* return to user-space */
24660 * The iretq could re-enable interrupts:
24661 */
24662 DISABLE_INTERRUPTS(CLBR_ANY)
24663+ pax_exit_kernel_user
24664+retint_swapgs_pax:
24665 TRACE_IRQS_IRETQ
24666 SWAPGS
24667 jmp restore_args
24668
24669 retint_restore_args: /* return to kernel space */
24670 DISABLE_INTERRUPTS(CLBR_ANY)
24671+ pax_exit_kernel
24672+
24673+#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC)
24674+ /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup,
24675+ * namely calling EFI runtime services with a phys mapping. We're
24676+ * starting off with NOPs and patch in the real instrumentation
24677+ * (BTS/OR) before starting any userland process; even before starting
24678+ * up the APs.
24679+ */
24680+ .pushsection .altinstr_replacement, "a"
24681+ 601: pax_force_retaddr (RIP-ARGOFFSET)
24682+ 602:
24683+ .popsection
24684+ 603: .fill 602b-601b, 1, 0x90
24685+ .pushsection .altinstructions, "a"
24686+ altinstruction_entry 603b, 601b, X86_FEATURE_ALWAYS, 602b-601b, 602b-601b
24687+ .popsection
24688+#else
24689+ pax_force_retaddr (RIP-ARGOFFSET)
24690+#endif
24691+
24692 /*
24693 * The iretq could re-enable interrupts:
24694 */
24695@@ -933,7 +1422,7 @@ ENTRY(retint_kernel)
24696 jmp exit_intr
24697 #endif
24698 CFI_ENDPROC
24699-END(common_interrupt)
24700+ENDPROC(common_interrupt)
24701
24702 /*
24703 * If IRET takes a fault on the espfix stack, then we
24704@@ -955,13 +1444,13 @@ __do_double_fault:
24705 cmpq $native_irq_return_iret,%rax
24706 jne do_double_fault /* This shouldn't happen... */
24707 movq PER_CPU_VAR(kernel_stack),%rax
24708- subq $(6*8-KERNEL_STACK_OFFSET),%rax /* Reset to original stack */
24709+ subq $(6*8),%rax /* Reset to original stack */
24710 movq %rax,RSP(%rdi)
24711 movq $0,(%rax) /* Missing (lost) #GP error code */
24712 movq $general_protection,RIP(%rdi)
24713 retq
24714 CFI_ENDPROC
24715-END(__do_double_fault)
24716+ENDPROC(__do_double_fault)
24717 #else
24718 # define __do_double_fault do_double_fault
24719 #endif
24720@@ -978,7 +1467,7 @@ ENTRY(\sym)
24721 interrupt \do_sym
24722 jmp ret_from_intr
24723 CFI_ENDPROC
24724-END(\sym)
24725+ENDPROC(\sym)
24726 .endm
24727
24728 #ifdef CONFIG_TRACING
24729@@ -1051,7 +1540,7 @@ apicinterrupt IRQ_WORK_VECTOR \
24730 /*
24731 * Exception entry points.
24732 */
24733-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
24734+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
24735
24736 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
24737 ENTRY(\sym)
24738@@ -1102,6 +1591,12 @@ ENTRY(\sym)
24739 .endif
24740
24741 .if \shift_ist != -1
24742+#ifdef CONFIG_SMP
24743+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
24744+ lea init_tss(%r13), %r13
24745+#else
24746+ lea init_tss(%rip), %r13
24747+#endif
24748 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\shift_ist)
24749 .endif
24750
24751@@ -1118,7 +1613,7 @@ ENTRY(\sym)
24752 .endif
24753
24754 CFI_ENDPROC
24755-END(\sym)
24756+ENDPROC(\sym)
24757 .endm
24758
24759 #ifdef CONFIG_TRACING
24760@@ -1159,9 +1654,10 @@ gs_change:
24761 2: mfence /* workaround */
24762 SWAPGS
24763 popfq_cfi
24764+ pax_force_retaddr
24765 ret
24766 CFI_ENDPROC
24767-END(native_load_gs_index)
24768+ENDPROC(native_load_gs_index)
24769
24770 _ASM_EXTABLE(gs_change,bad_gs)
24771 .section .fixup,"ax"
24772@@ -1189,9 +1685,10 @@ ENTRY(do_softirq_own_stack)
24773 CFI_DEF_CFA_REGISTER rsp
24774 CFI_ADJUST_CFA_OFFSET -8
24775 decl PER_CPU_VAR(irq_count)
24776+ pax_force_retaddr
24777 ret
24778 CFI_ENDPROC
24779-END(do_softirq_own_stack)
24780+ENDPROC(do_softirq_own_stack)
24781
24782 #ifdef CONFIG_XEN
24783 idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
24784@@ -1229,7 +1726,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
24785 decl PER_CPU_VAR(irq_count)
24786 jmp error_exit
24787 CFI_ENDPROC
24788-END(xen_do_hypervisor_callback)
24789+ENDPROC(xen_do_hypervisor_callback)
24790
24791 /*
24792 * Hypervisor uses this for application faults while it executes.
24793@@ -1288,7 +1785,7 @@ ENTRY(xen_failsafe_callback)
24794 SAVE_ALL
24795 jmp error_exit
24796 CFI_ENDPROC
24797-END(xen_failsafe_callback)
24798+ENDPROC(xen_failsafe_callback)
24799
24800 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
24801 xen_hvm_callback_vector xen_evtchn_do_upcall
24802@@ -1335,18 +1832,33 @@ ENTRY(paranoid_exit)
24803 DEFAULT_FRAME
24804 DISABLE_INTERRUPTS(CLBR_NONE)
24805 TRACE_IRQS_OFF_DEBUG
24806- testl %ebx,%ebx /* swapgs needed? */
24807+ testl $1,%ebx /* swapgs needed? */
24808 jnz paranoid_restore
24809- testl $3,CS(%rsp)
24810+ testb $3,CS(%rsp)
24811 jnz paranoid_userspace
24812+#ifdef CONFIG_PAX_MEMORY_UDEREF
24813+ pax_exit_kernel
24814+ TRACE_IRQS_IRETQ 0
24815+ SWAPGS_UNSAFE_STACK
24816+ RESTORE_ALL 8
24817+ pax_force_retaddr_bts
24818+ jmp irq_return
24819+#endif
24820 paranoid_swapgs:
24821+#ifdef CONFIG_PAX_MEMORY_UDEREF
24822+ pax_exit_kernel_user
24823+#else
24824+ pax_exit_kernel
24825+#endif
24826 TRACE_IRQS_IRETQ 0
24827 SWAPGS_UNSAFE_STACK
24828 RESTORE_ALL 8
24829 jmp irq_return
24830 paranoid_restore:
24831+ pax_exit_kernel
24832 TRACE_IRQS_IRETQ_DEBUG 0
24833 RESTORE_ALL 8
24834+ pax_force_retaddr_bts
24835 jmp irq_return
24836 paranoid_userspace:
24837 GET_THREAD_INFO(%rcx)
24838@@ -1375,7 +1887,7 @@ paranoid_schedule:
24839 TRACE_IRQS_OFF
24840 jmp paranoid_userspace
24841 CFI_ENDPROC
24842-END(paranoid_exit)
24843+ENDPROC(paranoid_exit)
24844
24845 /*
24846 * Exception entry point. This expects an error code/orig_rax on the stack.
24847@@ -1402,12 +1914,23 @@ ENTRY(error_entry)
24848 movq %r14, R14+8(%rsp)
24849 movq %r15, R15+8(%rsp)
24850 xorl %ebx,%ebx
24851- testl $3,CS+8(%rsp)
24852+ testb $3,CS+8(%rsp)
24853 je error_kernelspace
24854 error_swapgs:
24855 SWAPGS
24856 error_sti:
24857+#ifdef CONFIG_PAX_MEMORY_UDEREF
24858+ testb $3, CS+8(%rsp)
24859+ jnz 1f
24860+ pax_enter_kernel
24861+ jmp 2f
24862+1: pax_enter_kernel_user
24863+2:
24864+#else
24865+ pax_enter_kernel
24866+#endif
24867 TRACE_IRQS_OFF
24868+ pax_force_retaddr
24869 ret
24870
24871 /*
24872@@ -1435,7 +1958,7 @@ bstep_iret:
24873 movq %rcx,RIP+8(%rsp)
24874 jmp error_swapgs
24875 CFI_ENDPROC
24876-END(error_entry)
24877+ENDPROC(error_entry)
24878
24879
24880 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
24881@@ -1446,7 +1969,7 @@ ENTRY(error_exit)
24882 DISABLE_INTERRUPTS(CLBR_NONE)
24883 TRACE_IRQS_OFF
24884 GET_THREAD_INFO(%rcx)
24885- testl %eax,%eax
24886+ testl $1,%eax
24887 jne retint_kernel
24888 LOCKDEP_SYS_EXIT_IRQ
24889 movl TI_flags(%rcx),%edx
24890@@ -1455,7 +1978,7 @@ ENTRY(error_exit)
24891 jnz retint_careful
24892 jmp retint_swapgs
24893 CFI_ENDPROC
24894-END(error_exit)
24895+ENDPROC(error_exit)
24896
24897 /*
24898 * Test if a given stack is an NMI stack or not.
24899@@ -1513,9 +2036,11 @@ ENTRY(nmi)
24900 * If %cs was not the kernel segment, then the NMI triggered in user
24901 * space, which means it is definitely not nested.
24902 */
24903+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
24904+ je 1f
24905 cmpl $__KERNEL_CS, 16(%rsp)
24906 jne first_nmi
24907-
24908+1:
24909 /*
24910 * Check the special variable on the stack to see if NMIs are
24911 * executing.
24912@@ -1549,8 +2074,7 @@ nested_nmi:
24913
24914 1:
24915 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
24916- leaq -1*8(%rsp), %rdx
24917- movq %rdx, %rsp
24918+ subq $8, %rsp
24919 CFI_ADJUST_CFA_OFFSET 1*8
24920 leaq -10*8(%rsp), %rdx
24921 pushq_cfi $__KERNEL_DS
24922@@ -1568,6 +2092,7 @@ nested_nmi_out:
24923 CFI_RESTORE rdx
24924
24925 /* No need to check faults here */
24926+# pax_force_retaddr_bts
24927 INTERRUPT_RETURN
24928
24929 CFI_RESTORE_STATE
24930@@ -1664,13 +2189,13 @@ end_repeat_nmi:
24931 subq $ORIG_RAX-R15, %rsp
24932 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
24933 /*
24934- * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
24935+ * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
24936 * as we should not be calling schedule in NMI context.
24937 * Even with normal interrupts enabled. An NMI should not be
24938 * setting NEED_RESCHED or anything that normal interrupts and
24939 * exceptions might do.
24940 */
24941- call save_paranoid
24942+ call save_paranoid_nmi
24943 DEFAULT_FRAME 0
24944
24945 /*
24946@@ -1680,9 +2205,9 @@ end_repeat_nmi:
24947 * NMI itself takes a page fault, the page fault that was preempted
24948 * will read the information from the NMI page fault and not the
24949 * origin fault. Save it off and restore it if it changes.
24950- * Use the r12 callee-saved register.
24951+ * Use the r13 callee-saved register.
24952 */
24953- movq %cr2, %r12
24954+ movq %cr2, %r13
24955
24956 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
24957 movq %rsp,%rdi
24958@@ -1691,29 +2216,34 @@ end_repeat_nmi:
24959
24960 /* Did the NMI take a page fault? Restore cr2 if it did */
24961 movq %cr2, %rcx
24962- cmpq %rcx, %r12
24963+ cmpq %rcx, %r13
24964 je 1f
24965- movq %r12, %cr2
24966+ movq %r13, %cr2
24967 1:
24968
24969- testl %ebx,%ebx /* swapgs needed? */
24970+ testl $1,%ebx /* swapgs needed? */
24971 jnz nmi_restore
24972 nmi_swapgs:
24973 SWAPGS_UNSAFE_STACK
24974 nmi_restore:
24975+ pax_exit_kernel_nmi
24976 /* Pop the extra iret frame at once */
24977 RESTORE_ALL 6*8
24978+ testb $3, 8(%rsp)
24979+ jnz 1f
24980+ pax_force_retaddr_bts
24981+1:
24982
24983 /* Clear the NMI executing stack variable */
24984 movq $0, 5*8(%rsp)
24985 jmp irq_return
24986 CFI_ENDPROC
24987-END(nmi)
24988+ENDPROC(nmi)
24989
24990 ENTRY(ignore_sysret)
24991 CFI_STARTPROC
24992 mov $-ENOSYS,%eax
24993 sysret
24994 CFI_ENDPROC
24995-END(ignore_sysret)
24996+ENDPROC(ignore_sysret)
24997
24998diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
24999index 94d857f..bf1f0bf 100644
25000--- a/arch/x86/kernel/espfix_64.c
25001+++ b/arch/x86/kernel/espfix_64.c
25002@@ -197,7 +197,7 @@ void init_espfix_ap(void)
25003 set_pte(&pte_p[n*PTE_STRIDE], pte);
25004
25005 /* Job is done for this CPU and any CPU which shares this page */
25006- ACCESS_ONCE(espfix_pages[page]) = stack_page;
25007+ ACCESS_ONCE_RW(espfix_pages[page]) = stack_page;
25008
25009 unlock_done:
25010 mutex_unlock(&espfix_init_mutex);
25011diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
25012index 3386dc9..28bdf81 100644
25013--- a/arch/x86/kernel/ftrace.c
25014+++ b/arch/x86/kernel/ftrace.c
25015@@ -88,7 +88,7 @@ static unsigned long text_ip_addr(unsigned long ip)
25016 * kernel identity mapping to modify code.
25017 */
25018 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
25019- ip = (unsigned long)__va(__pa_symbol(ip));
25020+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
25021
25022 return ip;
25023 }
25024@@ -104,6 +104,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
25025 {
25026 unsigned char replaced[MCOUNT_INSN_SIZE];
25027
25028+ ip = ktla_ktva(ip);
25029+
25030 /*
25031 * Note: Due to modules and __init, code can
25032 * disappear and change, we need to protect against faulting
25033@@ -229,7 +231,7 @@ static int update_ftrace_func(unsigned long ip, void *new)
25034 unsigned char old[MCOUNT_INSN_SIZE];
25035 int ret;
25036
25037- memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
25038+ memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
25039
25040 ftrace_update_func = ip;
25041 /* Make sure the breakpoints see the ftrace_update_func update */
25042@@ -310,7 +312,7 @@ static int add_break(unsigned long ip, const char *old)
25043 unsigned char replaced[MCOUNT_INSN_SIZE];
25044 unsigned char brk = BREAKPOINT_INSTRUCTION;
25045
25046- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
25047+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
25048 return -EFAULT;
25049
25050 /* Make sure it is what we expect it to be */
25051diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
25052index eda1a86..8f6df48 100644
25053--- a/arch/x86/kernel/head64.c
25054+++ b/arch/x86/kernel/head64.c
25055@@ -67,12 +67,12 @@ again:
25056 pgd = *pgd_p;
25057
25058 /*
25059- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
25060- * critical -- __PAGE_OFFSET would point us back into the dynamic
25061+ * The use of __early_va rather than __va here is critical:
25062+ * __va would point us back into the dynamic
25063 * range and we might end up looping forever...
25064 */
25065 if (pgd)
25066- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
25067+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
25068 else {
25069 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
25070 reset_early_page_tables();
25071@@ -82,13 +82,13 @@ again:
25072 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
25073 for (i = 0; i < PTRS_PER_PUD; i++)
25074 pud_p[i] = 0;
25075- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
25076+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
25077 }
25078 pud_p += pud_index(address);
25079 pud = *pud_p;
25080
25081 if (pud)
25082- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
25083+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
25084 else {
25085 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
25086 reset_early_page_tables();
25087@@ -98,7 +98,7 @@ again:
25088 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
25089 for (i = 0; i < PTRS_PER_PMD; i++)
25090 pmd_p[i] = 0;
25091- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
25092+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
25093 }
25094 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
25095 pmd_p[pmd_index(address)] = pmd;
25096@@ -175,7 +175,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
25097 if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG)
25098 early_printk("Kernel alive\n");
25099
25100- clear_page(init_level4_pgt);
25101 /* set init_level4_pgt kernel high mapping*/
25102 init_level4_pgt[511] = early_level4_pgt[511];
25103
25104diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
25105index f36bd42..0ab4474 100644
25106--- a/arch/x86/kernel/head_32.S
25107+++ b/arch/x86/kernel/head_32.S
25108@@ -26,6 +26,12 @@
25109 /* Physical address */
25110 #define pa(X) ((X) - __PAGE_OFFSET)
25111
25112+#ifdef CONFIG_PAX_KERNEXEC
25113+#define ta(X) (X)
25114+#else
25115+#define ta(X) ((X) - __PAGE_OFFSET)
25116+#endif
25117+
25118 /*
25119 * References to members of the new_cpu_data structure.
25120 */
25121@@ -55,11 +61,7 @@
25122 * and small than max_low_pfn, otherwise will waste some page table entries
25123 */
25124
25125-#if PTRS_PER_PMD > 1
25126-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
25127-#else
25128-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
25129-#endif
25130+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
25131
25132 /* Number of possible pages in the lowmem region */
25133 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
25134@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
25135 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
25136
25137 /*
25138+ * Real beginning of normal "text" segment
25139+ */
25140+ENTRY(stext)
25141+ENTRY(_stext)
25142+
25143+/*
25144 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
25145 * %esi points to the real-mode code as a 32-bit pointer.
25146 * CS and DS must be 4 GB flat segments, but we don't depend on
25147@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
25148 * can.
25149 */
25150 __HEAD
25151+
25152+#ifdef CONFIG_PAX_KERNEXEC
25153+ jmp startup_32
25154+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
25155+.fill PAGE_SIZE-5,1,0xcc
25156+#endif
25157+
25158 ENTRY(startup_32)
25159 movl pa(stack_start),%ecx
25160
25161@@ -106,6 +121,59 @@ ENTRY(startup_32)
25162 2:
25163 leal -__PAGE_OFFSET(%ecx),%esp
25164
25165+#ifdef CONFIG_SMP
25166+ movl $pa(cpu_gdt_table),%edi
25167+ movl $__per_cpu_load,%eax
25168+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
25169+ rorl $16,%eax
25170+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
25171+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
25172+ movl $__per_cpu_end - 1,%eax
25173+ subl $__per_cpu_start,%eax
25174+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
25175+#endif
25176+
25177+#ifdef CONFIG_PAX_MEMORY_UDEREF
25178+ movl $NR_CPUS,%ecx
25179+ movl $pa(cpu_gdt_table),%edi
25180+1:
25181+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
25182+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
25183+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
25184+ addl $PAGE_SIZE_asm,%edi
25185+ loop 1b
25186+#endif
25187+
25188+#ifdef CONFIG_PAX_KERNEXEC
25189+ movl $pa(boot_gdt),%edi
25190+ movl $__LOAD_PHYSICAL_ADDR,%eax
25191+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
25192+ rorl $16,%eax
25193+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
25194+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
25195+ rorl $16,%eax
25196+
25197+ ljmp $(__BOOT_CS),$1f
25198+1:
25199+
25200+ movl $NR_CPUS,%ecx
25201+ movl $pa(cpu_gdt_table),%edi
25202+ addl $__PAGE_OFFSET,%eax
25203+1:
25204+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
25205+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
25206+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
25207+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
25208+ rorl $16,%eax
25209+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
25210+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
25211+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
25212+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
25213+ rorl $16,%eax
25214+ addl $PAGE_SIZE_asm,%edi
25215+ loop 1b
25216+#endif
25217+
25218 /*
25219 * Clear BSS first so that there are no surprises...
25220 */
25221@@ -201,8 +269,11 @@ ENTRY(startup_32)
25222 movl %eax, pa(max_pfn_mapped)
25223
25224 /* Do early initialization of the fixmap area */
25225- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
25226- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
25227+#ifdef CONFIG_COMPAT_VDSO
25228+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
25229+#else
25230+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
25231+#endif
25232 #else /* Not PAE */
25233
25234 page_pde_offset = (__PAGE_OFFSET >> 20);
25235@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
25236 movl %eax, pa(max_pfn_mapped)
25237
25238 /* Do early initialization of the fixmap area */
25239- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
25240- movl %eax,pa(initial_page_table+0xffc)
25241+#ifdef CONFIG_COMPAT_VDSO
25242+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
25243+#else
25244+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
25245+#endif
25246 #endif
25247
25248 #ifdef CONFIG_PARAVIRT
25249@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
25250 cmpl $num_subarch_entries, %eax
25251 jae bad_subarch
25252
25253- movl pa(subarch_entries)(,%eax,4), %eax
25254- subl $__PAGE_OFFSET, %eax
25255- jmp *%eax
25256+ jmp *pa(subarch_entries)(,%eax,4)
25257
25258 bad_subarch:
25259 WEAK(lguest_entry)
25260@@ -261,10 +333,10 @@ WEAK(xen_entry)
25261 __INITDATA
25262
25263 subarch_entries:
25264- .long default_entry /* normal x86/PC */
25265- .long lguest_entry /* lguest hypervisor */
25266- .long xen_entry /* Xen hypervisor */
25267- .long default_entry /* Moorestown MID */
25268+ .long ta(default_entry) /* normal x86/PC */
25269+ .long ta(lguest_entry) /* lguest hypervisor */
25270+ .long ta(xen_entry) /* Xen hypervisor */
25271+ .long ta(default_entry) /* Moorestown MID */
25272 num_subarch_entries = (. - subarch_entries) / 4
25273 .previous
25274 #else
25275@@ -354,6 +426,7 @@ default_entry:
25276 movl pa(mmu_cr4_features),%eax
25277 movl %eax,%cr4
25278
25279+#ifdef CONFIG_X86_PAE
25280 testb $X86_CR4_PAE, %al # check if PAE is enabled
25281 jz enable_paging
25282
25283@@ -382,6 +455,9 @@ default_entry:
25284 /* Make changes effective */
25285 wrmsr
25286
25287+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
25288+#endif
25289+
25290 enable_paging:
25291
25292 /*
25293@@ -449,14 +525,20 @@ is486:
25294 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
25295 movl %eax,%ss # after changing gdt.
25296
25297- movl $(__USER_DS),%eax # DS/ES contains default USER segment
25298+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
25299 movl %eax,%ds
25300 movl %eax,%es
25301
25302 movl $(__KERNEL_PERCPU), %eax
25303 movl %eax,%fs # set this cpu's percpu
25304
25305+#ifdef CONFIG_CC_STACKPROTECTOR
25306 movl $(__KERNEL_STACK_CANARY),%eax
25307+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
25308+ movl $(__USER_DS),%eax
25309+#else
25310+ xorl %eax,%eax
25311+#endif
25312 movl %eax,%gs
25313
25314 xorl %eax,%eax # Clear LDT
25315@@ -512,8 +594,11 @@ setup_once:
25316 * relocation. Manually set base address in stack canary
25317 * segment descriptor.
25318 */
25319- movl $gdt_page,%eax
25320+ movl $cpu_gdt_table,%eax
25321 movl $stack_canary,%ecx
25322+#ifdef CONFIG_SMP
25323+ addl $__per_cpu_load,%ecx
25324+#endif
25325 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
25326 shrl $16, %ecx
25327 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
25328@@ -548,7 +633,7 @@ ENTRY(early_idt_handler)
25329 cmpl $2,(%esp) # X86_TRAP_NMI
25330 je is_nmi # Ignore NMI
25331
25332- cmpl $2,%ss:early_recursion_flag
25333+ cmpl $1,%ss:early_recursion_flag
25334 je hlt_loop
25335 incl %ss:early_recursion_flag
25336
25337@@ -586,8 +671,8 @@ ENTRY(early_idt_handler)
25338 pushl (20+6*4)(%esp) /* trapno */
25339 pushl $fault_msg
25340 call printk
25341-#endif
25342 call dump_stack
25343+#endif
25344 hlt_loop:
25345 hlt
25346 jmp hlt_loop
25347@@ -607,8 +692,11 @@ ENDPROC(early_idt_handler)
25348 /* This is the default interrupt "handler" :-) */
25349 ALIGN
25350 ignore_int:
25351- cld
25352 #ifdef CONFIG_PRINTK
25353+ cmpl $2,%ss:early_recursion_flag
25354+ je hlt_loop
25355+ incl %ss:early_recursion_flag
25356+ cld
25357 pushl %eax
25358 pushl %ecx
25359 pushl %edx
25360@@ -617,9 +705,6 @@ ignore_int:
25361 movl $(__KERNEL_DS),%eax
25362 movl %eax,%ds
25363 movl %eax,%es
25364- cmpl $2,early_recursion_flag
25365- je hlt_loop
25366- incl early_recursion_flag
25367 pushl 16(%esp)
25368 pushl 24(%esp)
25369 pushl 32(%esp)
25370@@ -653,29 +738,34 @@ ENTRY(setup_once_ref)
25371 /*
25372 * BSS section
25373 */
25374-__PAGE_ALIGNED_BSS
25375- .align PAGE_SIZE
25376 #ifdef CONFIG_X86_PAE
25377+.section .initial_pg_pmd,"a",@progbits
25378 initial_pg_pmd:
25379 .fill 1024*KPMDS,4,0
25380 #else
25381+.section .initial_page_table,"a",@progbits
25382 ENTRY(initial_page_table)
25383 .fill 1024,4,0
25384 #endif
25385+.section .initial_pg_fixmap,"a",@progbits
25386 initial_pg_fixmap:
25387 .fill 1024,4,0
25388+.section .empty_zero_page,"a",@progbits
25389 ENTRY(empty_zero_page)
25390 .fill 4096,1,0
25391+.section .swapper_pg_dir,"a",@progbits
25392 ENTRY(swapper_pg_dir)
25393+#ifdef CONFIG_X86_PAE
25394+ .fill 4,8,0
25395+#else
25396 .fill 1024,4,0
25397+#endif
25398
25399 /*
25400 * This starts the data section.
25401 */
25402 #ifdef CONFIG_X86_PAE
25403-__PAGE_ALIGNED_DATA
25404- /* Page-aligned for the benefit of paravirt? */
25405- .align PAGE_SIZE
25406+.section .initial_page_table,"a",@progbits
25407 ENTRY(initial_page_table)
25408 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
25409 # if KPMDS == 3
25410@@ -694,12 +784,20 @@ ENTRY(initial_page_table)
25411 # error "Kernel PMDs should be 1, 2 or 3"
25412 # endif
25413 .align PAGE_SIZE /* needs to be page-sized too */
25414+
25415+#ifdef CONFIG_PAX_PER_CPU_PGD
25416+ENTRY(cpu_pgd)
25417+ .rept 2*NR_CPUS
25418+ .fill 4,8,0
25419+ .endr
25420+#endif
25421+
25422 #endif
25423
25424 .data
25425 .balign 4
25426 ENTRY(stack_start)
25427- .long init_thread_union+THREAD_SIZE
25428+ .long init_thread_union+THREAD_SIZE-8
25429
25430 __INITRODATA
25431 int_msg:
25432@@ -727,7 +825,7 @@ fault_msg:
25433 * segment size, and 32-bit linear address value:
25434 */
25435
25436- .data
25437+.section .rodata,"a",@progbits
25438 .globl boot_gdt_descr
25439 .globl idt_descr
25440
25441@@ -736,7 +834,7 @@ fault_msg:
25442 .word 0 # 32 bit align gdt_desc.address
25443 boot_gdt_descr:
25444 .word __BOOT_DS+7
25445- .long boot_gdt - __PAGE_OFFSET
25446+ .long pa(boot_gdt)
25447
25448 .word 0 # 32-bit align idt_desc.address
25449 idt_descr:
25450@@ -747,7 +845,7 @@ idt_descr:
25451 .word 0 # 32 bit align gdt_desc.address
25452 ENTRY(early_gdt_descr)
25453 .word GDT_ENTRIES*8-1
25454- .long gdt_page /* Overwritten for secondary CPUs */
25455+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
25456
25457 /*
25458 * The boot_gdt must mirror the equivalent in setup.S and is
25459@@ -756,5 +854,65 @@ ENTRY(early_gdt_descr)
25460 .align L1_CACHE_BYTES
25461 ENTRY(boot_gdt)
25462 .fill GDT_ENTRY_BOOT_CS,8,0
25463- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
25464- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
25465+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
25466+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
25467+
25468+ .align PAGE_SIZE_asm
25469+ENTRY(cpu_gdt_table)
25470+ .rept NR_CPUS
25471+ .quad 0x0000000000000000 /* NULL descriptor */
25472+ .quad 0x0000000000000000 /* 0x0b reserved */
25473+ .quad 0x0000000000000000 /* 0x13 reserved */
25474+ .quad 0x0000000000000000 /* 0x1b reserved */
25475+
25476+#ifdef CONFIG_PAX_KERNEXEC
25477+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
25478+#else
25479+ .quad 0x0000000000000000 /* 0x20 unused */
25480+#endif
25481+
25482+ .quad 0x0000000000000000 /* 0x28 unused */
25483+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
25484+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
25485+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
25486+ .quad 0x0000000000000000 /* 0x4b reserved */
25487+ .quad 0x0000000000000000 /* 0x53 reserved */
25488+ .quad 0x0000000000000000 /* 0x5b reserved */
25489+
25490+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
25491+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
25492+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
25493+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
25494+
25495+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
25496+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
25497+
25498+ /*
25499+ * Segments used for calling PnP BIOS have byte granularity.
25500+ * The code segments and data segments have fixed 64k limits,
25501+ * the transfer segment sizes are set at run time.
25502+ */
25503+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
25504+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
25505+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
25506+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
25507+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
25508+
25509+ /*
25510+ * The APM segments have byte granularity and their bases
25511+ * are set at run time. All have 64k limits.
25512+ */
25513+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
25514+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
25515+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
25516+
25517+ .quad 0x00c093000000ffff /* 0xd0 - ESPFIX SS */
25518+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
25519+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
25520+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
25521+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
25522+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
25523+
25524+ /* Be sure this is zeroed to avoid false validations in Xen */
25525+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
25526+ .endr
25527diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
25528index a468c0a..8b5a879 100644
25529--- a/arch/x86/kernel/head_64.S
25530+++ b/arch/x86/kernel/head_64.S
25531@@ -20,6 +20,8 @@
25532 #include <asm/processor-flags.h>
25533 #include <asm/percpu.h>
25534 #include <asm/nops.h>
25535+#include <asm/cpufeature.h>
25536+#include <asm/alternative-asm.h>
25537
25538 #ifdef CONFIG_PARAVIRT
25539 #include <asm/asm-offsets.h>
25540@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
25541 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
25542 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
25543 L3_START_KERNEL = pud_index(__START_KERNEL_map)
25544+L4_VMALLOC_START = pgd_index(VMALLOC_START)
25545+L3_VMALLOC_START = pud_index(VMALLOC_START)
25546+L4_VMALLOC_END = pgd_index(VMALLOC_END)
25547+L3_VMALLOC_END = pud_index(VMALLOC_END)
25548+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
25549+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
25550
25551 .text
25552 __HEAD
25553@@ -89,11 +97,24 @@ startup_64:
25554 * Fixup the physical addresses in the page table
25555 */
25556 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
25557+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
25558+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
25559+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
25560+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
25561+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
25562
25563- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
25564- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
25565+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
25566+#ifndef CONFIG_XEN
25567+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
25568+#endif
25569+
25570+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
25571+
25572+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
25573+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
25574
25575 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
25576+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
25577
25578 /*
25579 * Set up the identity mapping for the switchover. These
25580@@ -174,11 +195,12 @@ ENTRY(secondary_startup_64)
25581 * after the boot processor executes this code.
25582 */
25583
25584+ orq $-1, %rbp
25585 movq $(init_level4_pgt - __START_KERNEL_map), %rax
25586 1:
25587
25588- /* Enable PAE mode and PGE */
25589- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
25590+ /* Enable PAE mode and PSE/PGE */
25591+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
25592 movq %rcx, %cr4
25593
25594 /* Setup early boot stage 4 level pagetables. */
25595@@ -199,10 +221,19 @@ ENTRY(secondary_startup_64)
25596 movl $MSR_EFER, %ecx
25597 rdmsr
25598 btsl $_EFER_SCE, %eax /* Enable System Call */
25599- btl $20,%edi /* No Execute supported? */
25600+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
25601 jnc 1f
25602 btsl $_EFER_NX, %eax
25603+ cmpq $-1, %rbp
25604+ je 1f
25605 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
25606+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
25607+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
25608+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
25609+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
25610+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
25611+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
25612+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
25613 1: wrmsr /* Make changes effective */
25614
25615 /* Setup cr0 */
25616@@ -282,6 +313,7 @@ ENTRY(secondary_startup_64)
25617 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
25618 * address given in m16:64.
25619 */
25620+ pax_set_fptr_mask
25621 movq initial_code(%rip),%rax
25622 pushq $0 # fake return address to stop unwinder
25623 pushq $__KERNEL_CS # set correct cs
25624@@ -313,7 +345,7 @@ ENDPROC(start_cpu0)
25625 .quad INIT_PER_CPU_VAR(irq_stack_union)
25626
25627 GLOBAL(stack_start)
25628- .quad init_thread_union+THREAD_SIZE-8
25629+ .quad init_thread_union+THREAD_SIZE-16
25630 .word 0
25631 __FINITDATA
25632
25633@@ -391,7 +423,7 @@ ENTRY(early_idt_handler)
25634 call dump_stack
25635 #ifdef CONFIG_KALLSYMS
25636 leaq early_idt_ripmsg(%rip),%rdi
25637- movq 40(%rsp),%rsi # %rip again
25638+ movq 88(%rsp),%rsi # %rip again
25639 call __print_symbol
25640 #endif
25641 #endif /* EARLY_PRINTK */
25642@@ -420,6 +452,7 @@ ENDPROC(early_idt_handler)
25643 early_recursion_flag:
25644 .long 0
25645
25646+ .section .rodata,"a",@progbits
25647 #ifdef CONFIG_EARLY_PRINTK
25648 early_idt_msg:
25649 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
25650@@ -447,29 +480,52 @@ NEXT_PAGE(early_level4_pgt)
25651 NEXT_PAGE(early_dynamic_pgts)
25652 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
25653
25654- .data
25655+ .section .rodata,"a",@progbits
25656
25657-#ifndef CONFIG_XEN
25658 NEXT_PAGE(init_level4_pgt)
25659- .fill 512,8,0
25660-#else
25661-NEXT_PAGE(init_level4_pgt)
25662- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25663 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
25664 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25665+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
25666+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
25667+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
25668+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
25669+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
25670+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
25671 .org init_level4_pgt + L4_START_KERNEL*8, 0
25672 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
25673 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
25674
25675+#ifdef CONFIG_PAX_PER_CPU_PGD
25676+NEXT_PAGE(cpu_pgd)
25677+ .rept 2*NR_CPUS
25678+ .fill 512,8,0
25679+ .endr
25680+#endif
25681+
25682 NEXT_PAGE(level3_ident_pgt)
25683 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25684+#ifdef CONFIG_XEN
25685 .fill 511, 8, 0
25686+#else
25687+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
25688+ .fill 510,8,0
25689+#endif
25690+
25691+NEXT_PAGE(level3_vmalloc_start_pgt)
25692+ .fill 512,8,0
25693+
25694+NEXT_PAGE(level3_vmalloc_end_pgt)
25695+ .fill 512,8,0
25696+
25697+NEXT_PAGE(level3_vmemmap_pgt)
25698+ .fill L3_VMEMMAP_START,8,0
25699+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
25700+
25701 NEXT_PAGE(level2_ident_pgt)
25702- /* Since I easily can, map the first 1G.
25703+ /* Since I easily can, map the first 2G.
25704 * Don't set NX because code runs from these pages.
25705 */
25706- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
25707-#endif
25708+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
25709
25710 NEXT_PAGE(level3_kernel_pgt)
25711 .fill L3_START_KERNEL,8,0
25712@@ -477,6 +533,9 @@ NEXT_PAGE(level3_kernel_pgt)
25713 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
25714 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25715
25716+NEXT_PAGE(level2_vmemmap_pgt)
25717+ .fill 512,8,0
25718+
25719 NEXT_PAGE(level2_kernel_pgt)
25720 /*
25721 * 512 MB kernel mapping. We spend a full page on this pagetable
25722@@ -494,28 +553,64 @@ NEXT_PAGE(level2_kernel_pgt)
25723 NEXT_PAGE(level2_fixmap_pgt)
25724 .fill 506,8,0
25725 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25726- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
25727- .fill 5,8,0
25728+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
25729+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
25730+ .fill 4,8,0
25731
25732 NEXT_PAGE(level1_fixmap_pgt)
25733 .fill 512,8,0
25734
25735+NEXT_PAGE(level1_vsyscall_pgt)
25736+ .fill 512,8,0
25737+
25738 #undef PMDS
25739
25740- .data
25741+ .align PAGE_SIZE
25742+ENTRY(cpu_gdt_table)
25743+ .rept NR_CPUS
25744+ .quad 0x0000000000000000 /* NULL descriptor */
25745+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
25746+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
25747+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
25748+ .quad 0x00cffb000000ffff /* __USER32_CS */
25749+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
25750+ .quad 0x00affb000000ffff /* __USER_CS */
25751+
25752+#ifdef CONFIG_PAX_KERNEXEC
25753+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
25754+#else
25755+ .quad 0x0 /* unused */
25756+#endif
25757+
25758+ .quad 0,0 /* TSS */
25759+ .quad 0,0 /* LDT */
25760+ .quad 0,0,0 /* three TLS descriptors */
25761+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
25762+ /* asm/segment.h:GDT_ENTRIES must match this */
25763+
25764+#ifdef CONFIG_PAX_MEMORY_UDEREF
25765+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
25766+#else
25767+ .quad 0x0 /* unused */
25768+#endif
25769+
25770+ /* zero the remaining page */
25771+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
25772+ .endr
25773+
25774 .align 16
25775 .globl early_gdt_descr
25776 early_gdt_descr:
25777 .word GDT_ENTRIES*8-1
25778 early_gdt_descr_base:
25779- .quad INIT_PER_CPU_VAR(gdt_page)
25780+ .quad cpu_gdt_table
25781
25782 ENTRY(phys_base)
25783 /* This must match the first entry in level2_kernel_pgt */
25784 .quad 0x0000000000000000
25785
25786 #include "../../x86/xen/xen-head.S"
25787-
25788- __PAGE_ALIGNED_BSS
25789+
25790+ .section .rodata,"a",@progbits
25791 NEXT_PAGE(empty_zero_page)
25792 .skip PAGE_SIZE
25793diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
25794index 05fd74f..c3548b1 100644
25795--- a/arch/x86/kernel/i386_ksyms_32.c
25796+++ b/arch/x86/kernel/i386_ksyms_32.c
25797@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
25798 EXPORT_SYMBOL(cmpxchg8b_emu);
25799 #endif
25800
25801+EXPORT_SYMBOL_GPL(cpu_gdt_table);
25802+
25803 /* Networking helper routines. */
25804 EXPORT_SYMBOL(csum_partial_copy_generic);
25805+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
25806+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
25807
25808 EXPORT_SYMBOL(__get_user_1);
25809 EXPORT_SYMBOL(__get_user_2);
25810@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
25811 EXPORT_SYMBOL(___preempt_schedule_context);
25812 #endif
25813 #endif
25814+
25815+#ifdef CONFIG_PAX_KERNEXEC
25816+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
25817+#endif
25818+
25819+#ifdef CONFIG_PAX_PER_CPU_PGD
25820+EXPORT_SYMBOL(cpu_pgd);
25821+#endif
25822diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
25823index a9a4229..6f4d476 100644
25824--- a/arch/x86/kernel/i387.c
25825+++ b/arch/x86/kernel/i387.c
25826@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
25827 static inline bool interrupted_user_mode(void)
25828 {
25829 struct pt_regs *regs = get_irq_regs();
25830- return regs && user_mode_vm(regs);
25831+ return regs && user_mode(regs);
25832 }
25833
25834 /*
25835diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
25836index 8af8171..f8c1169 100644
25837--- a/arch/x86/kernel/i8259.c
25838+++ b/arch/x86/kernel/i8259.c
25839@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
25840 static void make_8259A_irq(unsigned int irq)
25841 {
25842 disable_irq_nosync(irq);
25843- io_apic_irqs &= ~(1<<irq);
25844+ io_apic_irqs &= ~(1UL<<irq);
25845 irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
25846 i8259A_chip.name);
25847 enable_irq(irq);
25848@@ -209,7 +209,7 @@ spurious_8259A_irq:
25849 "spurious 8259A interrupt: IRQ%d.\n", irq);
25850 spurious_irq_mask |= irqmask;
25851 }
25852- atomic_inc(&irq_err_count);
25853+ atomic_inc_unchecked(&irq_err_count);
25854 /*
25855 * Theoretically we do not have to handle this IRQ,
25856 * but in Linux this does not cause problems and is
25857@@ -350,14 +350,16 @@ static void init_8259A(int auto_eoi)
25858 /* (slave's support for AEOI in flat mode is to be investigated) */
25859 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
25860
25861+ pax_open_kernel();
25862 if (auto_eoi)
25863 /*
25864 * In AEOI mode we just have to mask the interrupt
25865 * when acking.
25866 */
25867- i8259A_chip.irq_mask_ack = disable_8259A_irq;
25868+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
25869 else
25870- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25871+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25872+ pax_close_kernel();
25873
25874 udelay(100); /* wait for 8259A to initialize */
25875
25876diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
25877index a979b5b..1d6db75 100644
25878--- a/arch/x86/kernel/io_delay.c
25879+++ b/arch/x86/kernel/io_delay.c
25880@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
25881 * Quirk table for systems that misbehave (lock up, etc.) if port
25882 * 0x80 is used:
25883 */
25884-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
25885+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
25886 {
25887 .callback = dmi_io_delay_0xed_port,
25888 .ident = "Compaq Presario V6000",
25889diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
25890index 4ddaf66..49d5c18 100644
25891--- a/arch/x86/kernel/ioport.c
25892+++ b/arch/x86/kernel/ioport.c
25893@@ -6,6 +6,7 @@
25894 #include <linux/sched.h>
25895 #include <linux/kernel.h>
25896 #include <linux/capability.h>
25897+#include <linux/security.h>
25898 #include <linux/errno.h>
25899 #include <linux/types.h>
25900 #include <linux/ioport.h>
25901@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25902 return -EINVAL;
25903 if (turn_on && !capable(CAP_SYS_RAWIO))
25904 return -EPERM;
25905+#ifdef CONFIG_GRKERNSEC_IO
25906+ if (turn_on && grsec_disable_privio) {
25907+ gr_handle_ioperm();
25908+ return -ENODEV;
25909+ }
25910+#endif
25911
25912 /*
25913 * If it's the first ioperm() call in this thread's lifetime, set the
25914@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25915 * because the ->io_bitmap_max value must match the bitmap
25916 * contents:
25917 */
25918- tss = &per_cpu(init_tss, get_cpu());
25919+ tss = init_tss + get_cpu();
25920
25921 if (turn_on)
25922 bitmap_clear(t->io_bitmap_ptr, from, num);
25923@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
25924 if (level > old) {
25925 if (!capable(CAP_SYS_RAWIO))
25926 return -EPERM;
25927+#ifdef CONFIG_GRKERNSEC_IO
25928+ if (grsec_disable_privio) {
25929+ gr_handle_iopl();
25930+ return -ENODEV;
25931+ }
25932+#endif
25933 }
25934 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
25935 t->iopl = level << 12;
25936diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
25937index 922d285..6d20692 100644
25938--- a/arch/x86/kernel/irq.c
25939+++ b/arch/x86/kernel/irq.c
25940@@ -22,7 +22,7 @@
25941 #define CREATE_TRACE_POINTS
25942 #include <asm/trace/irq_vectors.h>
25943
25944-atomic_t irq_err_count;
25945+atomic_unchecked_t irq_err_count;
25946
25947 /* Function pointer for generic interrupt vector handling */
25948 void (*x86_platform_ipi_callback)(void) = NULL;
25949@@ -132,9 +132,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
25950 seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
25951 seq_printf(p, " Hypervisor callback interrupts\n");
25952 #endif
25953- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
25954+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
25955 #if defined(CONFIG_X86_IO_APIC)
25956- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
25957+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
25958 #endif
25959 return 0;
25960 }
25961@@ -174,7 +174,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
25962
25963 u64 arch_irq_stat(void)
25964 {
25965- u64 sum = atomic_read(&irq_err_count);
25966+ u64 sum = atomic_read_unchecked(&irq_err_count);
25967 return sum;
25968 }
25969
25970diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
25971index 63ce838..2ea3e06 100644
25972--- a/arch/x86/kernel/irq_32.c
25973+++ b/arch/x86/kernel/irq_32.c
25974@@ -29,6 +29,8 @@ EXPORT_PER_CPU_SYMBOL(irq_regs);
25975
25976 #ifdef CONFIG_DEBUG_STACKOVERFLOW
25977
25978+extern void gr_handle_kernel_exploit(void);
25979+
25980 int sysctl_panic_on_stackoverflow __read_mostly;
25981
25982 /* Debugging check for stack overflow: is there less than 1KB free? */
25983@@ -39,13 +41,14 @@ static int check_stack_overflow(void)
25984 __asm__ __volatile__("andl %%esp,%0" :
25985 "=r" (sp) : "0" (THREAD_SIZE - 1));
25986
25987- return sp < (sizeof(struct thread_info) + STACK_WARN);
25988+ return sp < STACK_WARN;
25989 }
25990
25991 static void print_stack_overflow(void)
25992 {
25993 printk(KERN_WARNING "low stack detected by irq handler\n");
25994 dump_stack();
25995+ gr_handle_kernel_exploit();
25996 if (sysctl_panic_on_stackoverflow)
25997 panic("low stack detected by irq handler - check messages\n");
25998 }
25999@@ -84,10 +87,9 @@ static inline void *current_stack(void)
26000 static inline int
26001 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
26002 {
26003- struct irq_stack *curstk, *irqstk;
26004+ struct irq_stack *irqstk;
26005 u32 *isp, *prev_esp, arg1, arg2;
26006
26007- curstk = (struct irq_stack *) current_stack();
26008 irqstk = __this_cpu_read(hardirq_stack);
26009
26010 /*
26011@@ -96,15 +98,19 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
26012 * handler) we can't do that and just have to keep using the
26013 * current stack (which is the irq stack already after all)
26014 */
26015- if (unlikely(curstk == irqstk))
26016+ if (unlikely((void *)current_stack_pointer - (void *)irqstk < THREAD_SIZE))
26017 return 0;
26018
26019- isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
26020+ isp = (u32 *) ((char *)irqstk + sizeof(*irqstk) - 8);
26021
26022 /* Save the next esp at the bottom of the stack */
26023 prev_esp = (u32 *)irqstk;
26024 *prev_esp = current_stack_pointer;
26025
26026+#ifdef CONFIG_PAX_MEMORY_UDEREF
26027+ __set_fs(MAKE_MM_SEG(0));
26028+#endif
26029+
26030 if (unlikely(overflow))
26031 call_on_stack(print_stack_overflow, isp);
26032
26033@@ -115,6 +121,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
26034 : "0" (irq), "1" (desc), "2" (isp),
26035 "D" (desc->handle_irq)
26036 : "memory", "cc", "ecx");
26037+
26038+#ifdef CONFIG_PAX_MEMORY_UDEREF
26039+ __set_fs(current_thread_info()->addr_limit);
26040+#endif
26041+
26042 return 1;
26043 }
26044
26045@@ -123,32 +134,18 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
26046 */
26047 void irq_ctx_init(int cpu)
26048 {
26049- struct irq_stack *irqstk;
26050-
26051 if (per_cpu(hardirq_stack, cpu))
26052 return;
26053
26054- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
26055- THREADINFO_GFP,
26056- THREAD_SIZE_ORDER));
26057- per_cpu(hardirq_stack, cpu) = irqstk;
26058-
26059- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
26060- THREADINFO_GFP,
26061- THREAD_SIZE_ORDER));
26062- per_cpu(softirq_stack, cpu) = irqstk;
26063-
26064- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
26065- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
26066+ per_cpu(hardirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
26067+ per_cpu(softirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
26068 }
26069
26070 void do_softirq_own_stack(void)
26071 {
26072- struct thread_info *curstk;
26073 struct irq_stack *irqstk;
26074 u32 *isp, *prev_esp;
26075
26076- curstk = current_stack();
26077 irqstk = __this_cpu_read(softirq_stack);
26078
26079 /* build the stack frame on the softirq stack */
26080@@ -158,7 +155,16 @@ void do_softirq_own_stack(void)
26081 prev_esp = (u32 *)irqstk;
26082 *prev_esp = current_stack_pointer;
26083
26084+#ifdef CONFIG_PAX_MEMORY_UDEREF
26085+ __set_fs(MAKE_MM_SEG(0));
26086+#endif
26087+
26088 call_on_stack(__do_softirq, isp);
26089+
26090+#ifdef CONFIG_PAX_MEMORY_UDEREF
26091+ __set_fs(current_thread_info()->addr_limit);
26092+#endif
26093+
26094 }
26095
26096 bool handle_irq(unsigned irq, struct pt_regs *regs)
26097@@ -172,7 +178,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
26098 if (unlikely(!desc))
26099 return false;
26100
26101- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
26102+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
26103 if (unlikely(overflow))
26104 print_stack_overflow();
26105 desc->handle_irq(irq, desc);
26106diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
26107index 4d1c746..55a22d6 100644
26108--- a/arch/x86/kernel/irq_64.c
26109+++ b/arch/x86/kernel/irq_64.c
26110@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
26111 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
26112 EXPORT_PER_CPU_SYMBOL(irq_regs);
26113
26114+extern void gr_handle_kernel_exploit(void);
26115+
26116 int sysctl_panic_on_stackoverflow;
26117
26118 /*
26119@@ -44,7 +46,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
26120 u64 estack_top, estack_bottom;
26121 u64 curbase = (u64)task_stack_page(current);
26122
26123- if (user_mode_vm(regs))
26124+ if (user_mode(regs))
26125 return;
26126
26127 if (regs->sp >= curbase + sizeof(struct thread_info) +
26128@@ -69,6 +71,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
26129 irq_stack_top, irq_stack_bottom,
26130 estack_top, estack_bottom);
26131
26132+ gr_handle_kernel_exploit();
26133+
26134 if (sysctl_panic_on_stackoverflow)
26135 panic("low stack detected by irq handler - check messages\n");
26136 #endif
26137diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
26138index 26d5a55..a01160a 100644
26139--- a/arch/x86/kernel/jump_label.c
26140+++ b/arch/x86/kernel/jump_label.c
26141@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
26142 * Jump label is enabled for the first time.
26143 * So we expect a default_nop...
26144 */
26145- if (unlikely(memcmp((void *)entry->code, default_nop, 5)
26146+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
26147 != 0))
26148 bug_at((void *)entry->code, __LINE__);
26149 } else {
26150@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
26151 * ...otherwise expect an ideal_nop. Otherwise
26152 * something went horribly wrong.
26153 */
26154- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
26155+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
26156 != 0))
26157 bug_at((void *)entry->code, __LINE__);
26158 }
26159@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
26160 * are converting the default nop to the ideal nop.
26161 */
26162 if (init) {
26163- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
26164+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
26165 bug_at((void *)entry->code, __LINE__);
26166 } else {
26167 code.jump = 0xe9;
26168 code.offset = entry->target -
26169 (entry->code + JUMP_LABEL_NOP_SIZE);
26170- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
26171+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
26172 bug_at((void *)entry->code, __LINE__);
26173 }
26174 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
26175diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
26176index 7ec1d5f..5a7d130 100644
26177--- a/arch/x86/kernel/kgdb.c
26178+++ b/arch/x86/kernel/kgdb.c
26179@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
26180 #ifdef CONFIG_X86_32
26181 switch (regno) {
26182 case GDB_SS:
26183- if (!user_mode_vm(regs))
26184+ if (!user_mode(regs))
26185 *(unsigned long *)mem = __KERNEL_DS;
26186 break;
26187 case GDB_SP:
26188- if (!user_mode_vm(regs))
26189+ if (!user_mode(regs))
26190 *(unsigned long *)mem = kernel_stack_pointer(regs);
26191 break;
26192 case GDB_GS:
26193@@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void)
26194 bp->attr.bp_addr = breakinfo[breakno].addr;
26195 bp->attr.bp_len = breakinfo[breakno].len;
26196 bp->attr.bp_type = breakinfo[breakno].type;
26197- info->address = breakinfo[breakno].addr;
26198+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
26199+ info->address = ktla_ktva(breakinfo[breakno].addr);
26200+ else
26201+ info->address = breakinfo[breakno].addr;
26202 info->len = breakinfo[breakno].len;
26203 info->type = breakinfo[breakno].type;
26204 val = arch_install_hw_breakpoint(bp);
26205@@ -475,12 +478,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
26206 case 'k':
26207 /* clear the trace bit */
26208 linux_regs->flags &= ~X86_EFLAGS_TF;
26209- atomic_set(&kgdb_cpu_doing_single_step, -1);
26210+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
26211
26212 /* set the trace bit if we're stepping */
26213 if (remcomInBuffer[0] == 's') {
26214 linux_regs->flags |= X86_EFLAGS_TF;
26215- atomic_set(&kgdb_cpu_doing_single_step,
26216+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
26217 raw_smp_processor_id());
26218 }
26219
26220@@ -545,7 +548,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
26221
26222 switch (cmd) {
26223 case DIE_DEBUG:
26224- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
26225+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
26226 if (user_mode(regs))
26227 return single_step_cont(regs, args);
26228 break;
26229@@ -750,11 +753,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
26230 #endif /* CONFIG_DEBUG_RODATA */
26231
26232 bpt->type = BP_BREAKPOINT;
26233- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
26234+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
26235 BREAK_INSTR_SIZE);
26236 if (err)
26237 return err;
26238- err = probe_kernel_write((char *)bpt->bpt_addr,
26239+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
26240 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
26241 #ifdef CONFIG_DEBUG_RODATA
26242 if (!err)
26243@@ -767,7 +770,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
26244 return -EBUSY;
26245 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
26246 BREAK_INSTR_SIZE);
26247- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
26248+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
26249 if (err)
26250 return err;
26251 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
26252@@ -792,13 +795,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
26253 if (mutex_is_locked(&text_mutex))
26254 goto knl_write;
26255 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
26256- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
26257+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
26258 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
26259 goto knl_write;
26260 return err;
26261 knl_write:
26262 #endif /* CONFIG_DEBUG_RODATA */
26263- return probe_kernel_write((char *)bpt->bpt_addr,
26264+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
26265 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
26266 }
26267
26268diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
26269index 67e6d19..731ed28 100644
26270--- a/arch/x86/kernel/kprobes/core.c
26271+++ b/arch/x86/kernel/kprobes/core.c
26272@@ -120,9 +120,12 @@ __synthesize_relative_insn(void *from, void *to, u8 op)
26273 s32 raddr;
26274 } __packed *insn;
26275
26276- insn = (struct __arch_relative_insn *)from;
26277+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
26278+
26279+ pax_open_kernel();
26280 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
26281 insn->op = op;
26282+ pax_close_kernel();
26283 }
26284
26285 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
26286@@ -168,7 +171,7 @@ int can_boost(kprobe_opcode_t *opcodes)
26287 kprobe_opcode_t opcode;
26288 kprobe_opcode_t *orig_opcodes = opcodes;
26289
26290- if (search_exception_tables((unsigned long)opcodes))
26291+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
26292 return 0; /* Page fault may occur on this address. */
26293
26294 retry:
26295@@ -242,9 +245,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
26296 * for the first byte, we can recover the original instruction
26297 * from it and kp->opcode.
26298 */
26299- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
26300+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
26301 buf[0] = kp->opcode;
26302- return (unsigned long)buf;
26303+ return ktva_ktla((unsigned long)buf);
26304 }
26305
26306 /*
26307@@ -336,7 +339,9 @@ int __copy_instruction(u8 *dest, u8 *src)
26308 /* Another subsystem puts a breakpoint, failed to recover */
26309 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
26310 return 0;
26311+ pax_open_kernel();
26312 memcpy(dest, insn.kaddr, insn.length);
26313+ pax_close_kernel();
26314
26315 #ifdef CONFIG_X86_64
26316 if (insn_rip_relative(&insn)) {
26317@@ -363,7 +368,9 @@ int __copy_instruction(u8 *dest, u8 *src)
26318 return 0;
26319 }
26320 disp = (u8 *) dest + insn_offset_displacement(&insn);
26321+ pax_open_kernel();
26322 *(s32 *) disp = (s32) newdisp;
26323+ pax_close_kernel();
26324 }
26325 #endif
26326 return insn.length;
26327@@ -505,7 +512,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
26328 * nor set current_kprobe, because it doesn't use single
26329 * stepping.
26330 */
26331- regs->ip = (unsigned long)p->ainsn.insn;
26332+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
26333 preempt_enable_no_resched();
26334 return;
26335 }
26336@@ -522,9 +529,9 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
26337 regs->flags &= ~X86_EFLAGS_IF;
26338 /* single step inline if the instruction is an int3 */
26339 if (p->opcode == BREAKPOINT_INSTRUCTION)
26340- regs->ip = (unsigned long)p->addr;
26341+ regs->ip = ktla_ktva((unsigned long)p->addr);
26342 else
26343- regs->ip = (unsigned long)p->ainsn.insn;
26344+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
26345 }
26346 NOKPROBE_SYMBOL(setup_singlestep);
26347
26348@@ -574,7 +581,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
26349 struct kprobe *p;
26350 struct kprobe_ctlblk *kcb;
26351
26352- if (user_mode_vm(regs))
26353+ if (user_mode(regs))
26354 return 0;
26355
26356 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
26357@@ -609,7 +616,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
26358 setup_singlestep(p, regs, kcb, 0);
26359 return 1;
26360 }
26361- } else if (*addr != BREAKPOINT_INSTRUCTION) {
26362+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
26363 /*
26364 * The breakpoint instruction was removed right
26365 * after we hit it. Another cpu has removed
26366@@ -656,6 +663,9 @@ static void __used kretprobe_trampoline_holder(void)
26367 " movq %rax, 152(%rsp)\n"
26368 RESTORE_REGS_STRING
26369 " popfq\n"
26370+#ifdef KERNEXEC_PLUGIN
26371+ " btsq $63,(%rsp)\n"
26372+#endif
26373 #else
26374 " pushf\n"
26375 SAVE_REGS_STRING
26376@@ -796,7 +806,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
26377 struct kprobe_ctlblk *kcb)
26378 {
26379 unsigned long *tos = stack_addr(regs);
26380- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
26381+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
26382 unsigned long orig_ip = (unsigned long)p->addr;
26383 kprobe_opcode_t *insn = p->ainsn.insn;
26384
26385@@ -979,7 +989,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
26386 struct die_args *args = data;
26387 int ret = NOTIFY_DONE;
26388
26389- if (args->regs && user_mode_vm(args->regs))
26390+ if (args->regs && user_mode(args->regs))
26391 return ret;
26392
26393 if (val == DIE_GPF) {
26394diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
26395index f1314d0..15f3154 100644
26396--- a/arch/x86/kernel/kprobes/opt.c
26397+++ b/arch/x86/kernel/kprobes/opt.c
26398@@ -79,6 +79,7 @@ found:
26399 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
26400 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
26401 {
26402+ pax_open_kernel();
26403 #ifdef CONFIG_X86_64
26404 *addr++ = 0x48;
26405 *addr++ = 0xbf;
26406@@ -86,6 +87,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
26407 *addr++ = 0xb8;
26408 #endif
26409 *(unsigned long *)addr = val;
26410+ pax_close_kernel();
26411 }
26412
26413 asm (
26414@@ -337,7 +339,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
26415 * Verify if the address gap is in 2GB range, because this uses
26416 * a relative jump.
26417 */
26418- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
26419+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
26420 if (abs(rel) > 0x7fffffff) {
26421 __arch_remove_optimized_kprobe(op, 0);
26422 return -ERANGE;
26423@@ -354,16 +356,18 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
26424 op->optinsn.size = ret;
26425
26426 /* Copy arch-dep-instance from template */
26427- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
26428+ pax_open_kernel();
26429+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
26430+ pax_close_kernel();
26431
26432 /* Set probe information */
26433 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
26434
26435 /* Set probe function call */
26436- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
26437+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
26438
26439 /* Set returning jmp instruction at the tail of out-of-line buffer */
26440- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
26441+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
26442 (u8 *)op->kp.addr + op->optinsn.size);
26443
26444 flush_icache_range((unsigned long) buf,
26445@@ -388,7 +392,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
26446 WARN_ON(kprobe_disabled(&op->kp));
26447
26448 /* Backup instructions which will be replaced by jump address */
26449- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
26450+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
26451 RELATIVE_ADDR_SIZE);
26452
26453 insn_buf[0] = RELATIVEJUMP_OPCODE;
26454@@ -436,7 +440,7 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
26455 /* This kprobe is really able to run optimized path. */
26456 op = container_of(p, struct optimized_kprobe, kp);
26457 /* Detour through copied instructions */
26458- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
26459+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
26460 if (!reenter)
26461 reset_current_kprobe();
26462 preempt_enable_no_resched();
26463diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
26464index c2bedae..25e7ab60 100644
26465--- a/arch/x86/kernel/ksysfs.c
26466+++ b/arch/x86/kernel/ksysfs.c
26467@@ -184,7 +184,7 @@ out:
26468
26469 static struct kobj_attribute type_attr = __ATTR_RO(type);
26470
26471-static struct bin_attribute data_attr = {
26472+static bin_attribute_no_const data_attr __read_only = {
26473 .attr = {
26474 .name = "data",
26475 .mode = S_IRUGO,
26476diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
26477index c37886d..d851d32 100644
26478--- a/arch/x86/kernel/ldt.c
26479+++ b/arch/x86/kernel/ldt.c
26480@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
26481 if (reload) {
26482 #ifdef CONFIG_SMP
26483 preempt_disable();
26484- load_LDT(pc);
26485+ load_LDT_nolock(pc);
26486 if (!cpumask_equal(mm_cpumask(current->mm),
26487 cpumask_of(smp_processor_id())))
26488 smp_call_function(flush_ldt, current->mm, 1);
26489 preempt_enable();
26490 #else
26491- load_LDT(pc);
26492+ load_LDT_nolock(pc);
26493 #endif
26494 }
26495 if (oldsize) {
26496@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
26497 return err;
26498
26499 for (i = 0; i < old->size; i++)
26500- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
26501+ write_ldt_entry(new->ldt, i, old->ldt + i);
26502 return 0;
26503 }
26504
26505@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
26506 retval = copy_ldt(&mm->context, &old_mm->context);
26507 mutex_unlock(&old_mm->context.lock);
26508 }
26509+
26510+ if (tsk == current) {
26511+ mm->context.vdso = 0;
26512+
26513+#ifdef CONFIG_X86_32
26514+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
26515+ mm->context.user_cs_base = 0UL;
26516+ mm->context.user_cs_limit = ~0UL;
26517+
26518+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
26519+ cpus_clear(mm->context.cpu_user_cs_mask);
26520+#endif
26521+
26522+#endif
26523+#endif
26524+
26525+ }
26526+
26527 return retval;
26528 }
26529
26530@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
26531 }
26532 }
26533
26534+#ifdef CONFIG_PAX_SEGMEXEC
26535+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
26536+ error = -EINVAL;
26537+ goto out_unlock;
26538+ }
26539+#endif
26540+
26541 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
26542 error = -EINVAL;
26543 goto out_unlock;
26544diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
26545index 1667b1d..16492c5 100644
26546--- a/arch/x86/kernel/machine_kexec_32.c
26547+++ b/arch/x86/kernel/machine_kexec_32.c
26548@@ -25,7 +25,7 @@
26549 #include <asm/cacheflush.h>
26550 #include <asm/debugreg.h>
26551
26552-static void set_idt(void *newidt, __u16 limit)
26553+static void set_idt(struct desc_struct *newidt, __u16 limit)
26554 {
26555 struct desc_ptr curidt;
26556
26557@@ -37,7 +37,7 @@ static void set_idt(void *newidt, __u16 limit)
26558 }
26559
26560
26561-static void set_gdt(void *newgdt, __u16 limit)
26562+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
26563 {
26564 struct desc_ptr curgdt;
26565
26566@@ -215,7 +215,7 @@ void machine_kexec(struct kimage *image)
26567 }
26568
26569 control_page = page_address(image->control_code_page);
26570- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
26571+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
26572
26573 relocate_kernel_ptr = control_page;
26574 page_list[PA_CONTROL_PAGE] = __pa(control_page);
26575diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
26576index c73aecf..4c63630 100644
26577--- a/arch/x86/kernel/mcount_64.S
26578+++ b/arch/x86/kernel/mcount_64.S
26579@@ -7,7 +7,7 @@
26580 #include <linux/linkage.h>
26581 #include <asm/ptrace.h>
26582 #include <asm/ftrace.h>
26583-
26584+#include <asm/alternative-asm.h>
26585
26586 .code64
26587 .section .entry.text, "ax"
26588@@ -24,8 +24,9 @@
26589 #ifdef CONFIG_DYNAMIC_FTRACE
26590
26591 ENTRY(function_hook)
26592+ pax_force_retaddr
26593 retq
26594-END(function_hook)
26595+ENDPROC(function_hook)
26596
26597 /* skip is set if stack has been adjusted */
26598 .macro ftrace_caller_setup skip=0
26599@@ -62,8 +63,9 @@ GLOBAL(ftrace_graph_call)
26600 #endif
26601
26602 GLOBAL(ftrace_stub)
26603+ pax_force_retaddr
26604 retq
26605-END(ftrace_caller)
26606+ENDPROC(ftrace_caller)
26607
26608 ENTRY(ftrace_regs_caller)
26609 /* Save the current flags before compare (in SS location)*/
26610@@ -127,7 +129,7 @@ GLOBAL(ftrace_regs_call)
26611 popfq
26612 jmp ftrace_stub
26613
26614-END(ftrace_regs_caller)
26615+ENDPROC(ftrace_regs_caller)
26616
26617
26618 #else /* ! CONFIG_DYNAMIC_FTRACE */
26619@@ -145,6 +147,7 @@ ENTRY(function_hook)
26620 #endif
26621
26622 GLOBAL(ftrace_stub)
26623+ pax_force_retaddr
26624 retq
26625
26626 trace:
26627@@ -158,12 +161,13 @@ trace:
26628 #endif
26629 subq $MCOUNT_INSN_SIZE, %rdi
26630
26631+ pax_force_fptr ftrace_trace_function
26632 call *ftrace_trace_function
26633
26634 MCOUNT_RESTORE_FRAME
26635
26636 jmp ftrace_stub
26637-END(function_hook)
26638+ENDPROC(function_hook)
26639 #endif /* CONFIG_DYNAMIC_FTRACE */
26640 #endif /* CONFIG_FUNCTION_TRACER */
26641
26642@@ -185,8 +189,9 @@ ENTRY(ftrace_graph_caller)
26643
26644 MCOUNT_RESTORE_FRAME
26645
26646+ pax_force_retaddr
26647 retq
26648-END(ftrace_graph_caller)
26649+ENDPROC(ftrace_graph_caller)
26650
26651 GLOBAL(return_to_handler)
26652 subq $24, %rsp
26653@@ -202,5 +207,7 @@ GLOBAL(return_to_handler)
26654 movq 8(%rsp), %rdx
26655 movq (%rsp), %rax
26656 addq $24, %rsp
26657+ pax_force_fptr %rdi
26658 jmp *%rdi
26659+ENDPROC(return_to_handler)
26660 #endif
26661diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
26662index e69f988..da078ea 100644
26663--- a/arch/x86/kernel/module.c
26664+++ b/arch/x86/kernel/module.c
26665@@ -81,17 +81,62 @@ static unsigned long int get_module_load_offset(void)
26666 }
26667 #endif
26668
26669-void *module_alloc(unsigned long size)
26670+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
26671 {
26672- if (PAGE_ALIGN(size) > MODULES_LEN)
26673+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
26674 return NULL;
26675 return __vmalloc_node_range(size, 1,
26676 MODULES_VADDR + get_module_load_offset(),
26677- MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
26678- PAGE_KERNEL_EXEC, NUMA_NO_NODE,
26679+ MODULES_END, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
26680+ prot, NUMA_NO_NODE,
26681 __builtin_return_address(0));
26682 }
26683
26684+void *module_alloc(unsigned long size)
26685+{
26686+
26687+#ifdef CONFIG_PAX_KERNEXEC
26688+ return __module_alloc(size, PAGE_KERNEL);
26689+#else
26690+ return __module_alloc(size, PAGE_KERNEL_EXEC);
26691+#endif
26692+
26693+}
26694+
26695+#ifdef CONFIG_PAX_KERNEXEC
26696+#ifdef CONFIG_X86_32
26697+void *module_alloc_exec(unsigned long size)
26698+{
26699+ struct vm_struct *area;
26700+
26701+ if (size == 0)
26702+ return NULL;
26703+
26704+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
26705+return area ? area->addr : NULL;
26706+}
26707+EXPORT_SYMBOL(module_alloc_exec);
26708+
26709+void module_free_exec(struct module *mod, void *module_region)
26710+{
26711+ vunmap(module_region);
26712+}
26713+EXPORT_SYMBOL(module_free_exec);
26714+#else
26715+void module_free_exec(struct module *mod, void *module_region)
26716+{
26717+ module_free(mod, module_region);
26718+}
26719+EXPORT_SYMBOL(module_free_exec);
26720+
26721+void *module_alloc_exec(unsigned long size)
26722+{
26723+ return __module_alloc(size, PAGE_KERNEL_RX);
26724+}
26725+EXPORT_SYMBOL(module_alloc_exec);
26726+#endif
26727+#endif
26728+
26729 #ifdef CONFIG_X86_32
26730 int apply_relocate(Elf32_Shdr *sechdrs,
26731 const char *strtab,
26732@@ -102,14 +147,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26733 unsigned int i;
26734 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
26735 Elf32_Sym *sym;
26736- uint32_t *location;
26737+ uint32_t *plocation, location;
26738
26739 DEBUGP("Applying relocate section %u to %u\n",
26740 relsec, sechdrs[relsec].sh_info);
26741 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
26742 /* This is where to make the change */
26743- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
26744- + rel[i].r_offset;
26745+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
26746+ location = (uint32_t)plocation;
26747+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
26748+ plocation = ktla_ktva((void *)plocation);
26749 /* This is the symbol it is referring to. Note that all
26750 undefined symbols have been resolved. */
26751 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
26752@@ -118,11 +165,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26753 switch (ELF32_R_TYPE(rel[i].r_info)) {
26754 case R_386_32:
26755 /* We add the value into the location given */
26756- *location += sym->st_value;
26757+ pax_open_kernel();
26758+ *plocation += sym->st_value;
26759+ pax_close_kernel();
26760 break;
26761 case R_386_PC32:
26762 /* Add the value, subtract its position */
26763- *location += sym->st_value - (uint32_t)location;
26764+ pax_open_kernel();
26765+ *plocation += sym->st_value - location;
26766+ pax_close_kernel();
26767 break;
26768 default:
26769 pr_err("%s: Unknown relocation: %u\n",
26770@@ -167,21 +218,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
26771 case R_X86_64_NONE:
26772 break;
26773 case R_X86_64_64:
26774+ pax_open_kernel();
26775 *(u64 *)loc = val;
26776+ pax_close_kernel();
26777 break;
26778 case R_X86_64_32:
26779+ pax_open_kernel();
26780 *(u32 *)loc = val;
26781+ pax_close_kernel();
26782 if (val != *(u32 *)loc)
26783 goto overflow;
26784 break;
26785 case R_X86_64_32S:
26786+ pax_open_kernel();
26787 *(s32 *)loc = val;
26788+ pax_close_kernel();
26789 if ((s64)val != *(s32 *)loc)
26790 goto overflow;
26791 break;
26792 case R_X86_64_PC32:
26793 val -= (u64)loc;
26794+ pax_open_kernel();
26795 *(u32 *)loc = val;
26796+ pax_close_kernel();
26797+
26798 #if 0
26799 if ((s64)val != *(s32 *)loc)
26800 goto overflow;
26801diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
26802index c9603ac..9f88728 100644
26803--- a/arch/x86/kernel/msr.c
26804+++ b/arch/x86/kernel/msr.c
26805@@ -37,6 +37,7 @@
26806 #include <linux/notifier.h>
26807 #include <linux/uaccess.h>
26808 #include <linux/gfp.h>
26809+#include <linux/grsecurity.h>
26810
26811 #include <asm/processor.h>
26812 #include <asm/msr.h>
26813@@ -103,6 +104,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
26814 int err = 0;
26815 ssize_t bytes = 0;
26816
26817+#ifdef CONFIG_GRKERNSEC_KMEM
26818+ gr_handle_msr_write();
26819+ return -EPERM;
26820+#endif
26821+
26822 if (count % 8)
26823 return -EINVAL; /* Invalid chunk size */
26824
26825@@ -150,6 +156,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
26826 err = -EBADF;
26827 break;
26828 }
26829+#ifdef CONFIG_GRKERNSEC_KMEM
26830+ gr_handle_msr_write();
26831+ return -EPERM;
26832+#endif
26833 if (copy_from_user(&regs, uregs, sizeof regs)) {
26834 err = -EFAULT;
26835 break;
26836@@ -233,7 +243,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
26837 return notifier_from_errno(err);
26838 }
26839
26840-static struct notifier_block __refdata msr_class_cpu_notifier = {
26841+static struct notifier_block msr_class_cpu_notifier = {
26842 .notifier_call = msr_class_cpu_callback,
26843 };
26844
26845diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
26846index c3e985d..110a36a 100644
26847--- a/arch/x86/kernel/nmi.c
26848+++ b/arch/x86/kernel/nmi.c
26849@@ -98,16 +98,16 @@ fs_initcall(nmi_warning_debugfs);
26850
26851 static void nmi_max_handler(struct irq_work *w)
26852 {
26853- struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
26854+ struct nmiwork *n = container_of(w, struct nmiwork, irq_work);
26855 int remainder_ns, decimal_msecs;
26856- u64 whole_msecs = ACCESS_ONCE(a->max_duration);
26857+ u64 whole_msecs = ACCESS_ONCE(n->max_duration);
26858
26859 remainder_ns = do_div(whole_msecs, (1000 * 1000));
26860 decimal_msecs = remainder_ns / 1000;
26861
26862 printk_ratelimited(KERN_INFO
26863 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
26864- a->handler, whole_msecs, decimal_msecs);
26865+ n->action->handler, whole_msecs, decimal_msecs);
26866 }
26867
26868 static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26869@@ -134,11 +134,11 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26870 delta = sched_clock() - delta;
26871 trace_nmi_handler(a->handler, (int)delta, thishandled);
26872
26873- if (delta < nmi_longest_ns || delta < a->max_duration)
26874+ if (delta < nmi_longest_ns || delta < a->work->max_duration)
26875 continue;
26876
26877- a->max_duration = delta;
26878- irq_work_queue(&a->irq_work);
26879+ a->work->max_duration = delta;
26880+ irq_work_queue(&a->work->irq_work);
26881 }
26882
26883 rcu_read_unlock();
26884@@ -148,7 +148,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26885 }
26886 NOKPROBE_SYMBOL(nmi_handle);
26887
26888-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26889+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
26890 {
26891 struct nmi_desc *desc = nmi_to_desc(type);
26892 unsigned long flags;
26893@@ -156,7 +156,8 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26894 if (!action->handler)
26895 return -EINVAL;
26896
26897- init_irq_work(&action->irq_work, nmi_max_handler);
26898+ action->work->action = action;
26899+ init_irq_work(&action->work->irq_work, nmi_max_handler);
26900
26901 spin_lock_irqsave(&desc->lock, flags);
26902
26903@@ -174,9 +175,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26904 * event confuses some handlers (kdump uses this flag)
26905 */
26906 if (action->flags & NMI_FLAG_FIRST)
26907- list_add_rcu(&action->list, &desc->head);
26908+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
26909 else
26910- list_add_tail_rcu(&action->list, &desc->head);
26911+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
26912
26913 spin_unlock_irqrestore(&desc->lock, flags);
26914 return 0;
26915@@ -199,7 +200,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
26916 if (!strcmp(n->name, name)) {
26917 WARN(in_nmi(),
26918 "Trying to free NMI (%s) from NMI context!\n", n->name);
26919- list_del_rcu(&n->list);
26920+ pax_list_del_rcu((struct list_head *)&n->list);
26921 break;
26922 }
26923 }
26924@@ -528,6 +529,17 @@ static inline void nmi_nesting_postprocess(void)
26925 dotraplinkage notrace void
26926 do_nmi(struct pt_regs *regs, long error_code)
26927 {
26928+
26929+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26930+ if (!user_mode(regs)) {
26931+ unsigned long cs = regs->cs & 0xFFFF;
26932+ unsigned long ip = ktva_ktla(regs->ip);
26933+
26934+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
26935+ regs->ip = ip;
26936+ }
26937+#endif
26938+
26939 nmi_nesting_preprocess(regs);
26940
26941 nmi_enter();
26942diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
26943index 6d9582e..f746287 100644
26944--- a/arch/x86/kernel/nmi_selftest.c
26945+++ b/arch/x86/kernel/nmi_selftest.c
26946@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
26947 {
26948 /* trap all the unknown NMIs we may generate */
26949 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
26950- __initdata);
26951+ __initconst);
26952 }
26953
26954 static void __init cleanup_nmi_testsuite(void)
26955@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
26956 unsigned long timeout;
26957
26958 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
26959- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
26960+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
26961 nmi_fail = FAILURE;
26962 return;
26963 }
26964diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
26965index bbb6c73..24a58ef 100644
26966--- a/arch/x86/kernel/paravirt-spinlocks.c
26967+++ b/arch/x86/kernel/paravirt-spinlocks.c
26968@@ -8,7 +8,7 @@
26969
26970 #include <asm/paravirt.h>
26971
26972-struct pv_lock_ops pv_lock_ops = {
26973+struct pv_lock_ops pv_lock_ops __read_only = {
26974 #ifdef CONFIG_SMP
26975 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
26976 .unlock_kick = paravirt_nop,
26977diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
26978index 548d25f..f8fb99c 100644
26979--- a/arch/x86/kernel/paravirt.c
26980+++ b/arch/x86/kernel/paravirt.c
26981@@ -56,6 +56,9 @@ u64 _paravirt_ident_64(u64 x)
26982 {
26983 return x;
26984 }
26985+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26986+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
26987+#endif
26988
26989 void __init default_banner(void)
26990 {
26991@@ -142,16 +145,20 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
26992
26993 if (opfunc == NULL)
26994 /* If there's no function, patch it with a ud2a (BUG) */
26995- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
26996- else if (opfunc == _paravirt_nop)
26997+ ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a));
26998+ else if (opfunc == (void *)_paravirt_nop)
26999 /* If the operation is a nop, then nop the callsite */
27000 ret = paravirt_patch_nop();
27001
27002 /* identity functions just return their single argument */
27003- else if (opfunc == _paravirt_ident_32)
27004+ else if (opfunc == (void *)_paravirt_ident_32)
27005 ret = paravirt_patch_ident_32(insnbuf, len);
27006- else if (opfunc == _paravirt_ident_64)
27007+ else if (opfunc == (void *)_paravirt_ident_64)
27008 ret = paravirt_patch_ident_64(insnbuf, len);
27009+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
27010+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
27011+ ret = paravirt_patch_ident_64(insnbuf, len);
27012+#endif
27013
27014 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
27015 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
27016@@ -176,7 +183,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
27017 if (insn_len > len || start == NULL)
27018 insn_len = len;
27019 else
27020- memcpy(insnbuf, start, insn_len);
27021+ memcpy(insnbuf, ktla_ktva(start), insn_len);
27022
27023 return insn_len;
27024 }
27025@@ -300,7 +307,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
27026 return this_cpu_read(paravirt_lazy_mode);
27027 }
27028
27029-struct pv_info pv_info = {
27030+struct pv_info pv_info __read_only = {
27031 .name = "bare hardware",
27032 .paravirt_enabled = 0,
27033 .kernel_rpl = 0,
27034@@ -311,16 +318,16 @@ struct pv_info pv_info = {
27035 #endif
27036 };
27037
27038-struct pv_init_ops pv_init_ops = {
27039+struct pv_init_ops pv_init_ops __read_only = {
27040 .patch = native_patch,
27041 };
27042
27043-struct pv_time_ops pv_time_ops = {
27044+struct pv_time_ops pv_time_ops __read_only = {
27045 .sched_clock = native_sched_clock,
27046 .steal_clock = native_steal_clock,
27047 };
27048
27049-__visible struct pv_irq_ops pv_irq_ops = {
27050+__visible struct pv_irq_ops pv_irq_ops __read_only = {
27051 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
27052 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
27053 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
27054@@ -332,7 +339,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
27055 #endif
27056 };
27057
27058-__visible struct pv_cpu_ops pv_cpu_ops = {
27059+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
27060 .cpuid = native_cpuid,
27061 .get_debugreg = native_get_debugreg,
27062 .set_debugreg = native_set_debugreg,
27063@@ -395,21 +402,26 @@ NOKPROBE_SYMBOL(native_get_debugreg);
27064 NOKPROBE_SYMBOL(native_set_debugreg);
27065 NOKPROBE_SYMBOL(native_load_idt);
27066
27067-struct pv_apic_ops pv_apic_ops = {
27068+struct pv_apic_ops pv_apic_ops __read_only= {
27069 #ifdef CONFIG_X86_LOCAL_APIC
27070 .startup_ipi_hook = paravirt_nop,
27071 #endif
27072 };
27073
27074-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
27075+#ifdef CONFIG_X86_32
27076+#ifdef CONFIG_X86_PAE
27077+/* 64-bit pagetable entries */
27078+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
27079+#else
27080 /* 32-bit pagetable entries */
27081 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
27082+#endif
27083 #else
27084 /* 64-bit pagetable entries */
27085 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
27086 #endif
27087
27088-struct pv_mmu_ops pv_mmu_ops = {
27089+struct pv_mmu_ops pv_mmu_ops __read_only = {
27090
27091 .read_cr2 = native_read_cr2,
27092 .write_cr2 = native_write_cr2,
27093@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
27094 .make_pud = PTE_IDENT,
27095
27096 .set_pgd = native_set_pgd,
27097+ .set_pgd_batched = native_set_pgd_batched,
27098 #endif
27099 #endif /* PAGETABLE_LEVELS >= 3 */
27100
27101@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
27102 },
27103
27104 .set_fixmap = native_set_fixmap,
27105+
27106+#ifdef CONFIG_PAX_KERNEXEC
27107+ .pax_open_kernel = native_pax_open_kernel,
27108+ .pax_close_kernel = native_pax_close_kernel,
27109+#endif
27110+
27111 };
27112
27113 EXPORT_SYMBOL_GPL(pv_time_ops);
27114diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
27115index 0497f71..7186c0d 100644
27116--- a/arch/x86/kernel/pci-calgary_64.c
27117+++ b/arch/x86/kernel/pci-calgary_64.c
27118@@ -1347,7 +1347,7 @@ static void __init get_tce_space_from_tar(void)
27119 tce_space = be64_to_cpu(readq(target));
27120 tce_space = tce_space & TAR_SW_BITS;
27121
27122- tce_space = tce_space & (~specified_table_size);
27123+ tce_space = tce_space & (~(unsigned long)specified_table_size);
27124 info->tce_space = (u64 *)__va(tce_space);
27125 }
27126 }
27127diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
27128index 35ccf75..7a15747 100644
27129--- a/arch/x86/kernel/pci-iommu_table.c
27130+++ b/arch/x86/kernel/pci-iommu_table.c
27131@@ -2,7 +2,7 @@
27132 #include <asm/iommu_table.h>
27133 #include <linux/string.h>
27134 #include <linux/kallsyms.h>
27135-
27136+#include <linux/sched.h>
27137
27138 #define DEBUG 1
27139
27140diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
27141index 77dd0ad..9ec4723 100644
27142--- a/arch/x86/kernel/pci-swiotlb.c
27143+++ b/arch/x86/kernel/pci-swiotlb.c
27144@@ -33,7 +33,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
27145 struct dma_attrs *attrs)
27146 {
27147 if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
27148- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
27149+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
27150 else
27151 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
27152 }
27153diff --git a/arch/x86/kernel/preempt.S b/arch/x86/kernel/preempt.S
27154index ca7f0d5..8996469 100644
27155--- a/arch/x86/kernel/preempt.S
27156+++ b/arch/x86/kernel/preempt.S
27157@@ -3,12 +3,14 @@
27158 #include <asm/dwarf2.h>
27159 #include <asm/asm.h>
27160 #include <asm/calling.h>
27161+#include <asm/alternative-asm.h>
27162
27163 ENTRY(___preempt_schedule)
27164 CFI_STARTPROC
27165 SAVE_ALL
27166 call preempt_schedule
27167 RESTORE_ALL
27168+ pax_force_retaddr
27169 ret
27170 CFI_ENDPROC
27171
27172@@ -19,6 +21,7 @@ ENTRY(___preempt_schedule_context)
27173 SAVE_ALL
27174 call preempt_schedule_context
27175 RESTORE_ALL
27176+ pax_force_retaddr
27177 ret
27178 CFI_ENDPROC
27179
27180diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
27181index f804dc9..7c62095 100644
27182--- a/arch/x86/kernel/process.c
27183+++ b/arch/x86/kernel/process.c
27184@@ -36,7 +36,8 @@
27185 * section. Since TSS's are completely CPU-local, we want them
27186 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
27187 */
27188-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
27189+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
27190+EXPORT_SYMBOL(init_tss);
27191
27192 #ifdef CONFIG_X86_64
27193 static DEFINE_PER_CPU(unsigned char, is_idle);
27194@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
27195 task_xstate_cachep =
27196 kmem_cache_create("task_xstate", xstate_size,
27197 __alignof__(union thread_xstate),
27198- SLAB_PANIC | SLAB_NOTRACK, NULL);
27199+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
27200 setup_xstate_comp();
27201 }
27202
27203@@ -106,7 +107,7 @@ void exit_thread(void)
27204 unsigned long *bp = t->io_bitmap_ptr;
27205
27206 if (bp) {
27207- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
27208+ struct tss_struct *tss = init_tss + get_cpu();
27209
27210 t->io_bitmap_ptr = NULL;
27211 clear_thread_flag(TIF_IO_BITMAP);
27212@@ -126,6 +127,9 @@ void flush_thread(void)
27213 {
27214 struct task_struct *tsk = current;
27215
27216+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
27217+ loadsegment(gs, 0);
27218+#endif
27219 flush_ptrace_hw_breakpoint(tsk);
27220 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
27221 drop_init_fpu(tsk);
27222@@ -272,7 +276,7 @@ static void __exit_idle(void)
27223 void exit_idle(void)
27224 {
27225 /* idle loop has pid 0 */
27226- if (current->pid)
27227+ if (task_pid_nr(current))
27228 return;
27229 __exit_idle();
27230 }
27231@@ -325,7 +329,7 @@ bool xen_set_default_idle(void)
27232 return ret;
27233 }
27234 #endif
27235-void stop_this_cpu(void *dummy)
27236+__noreturn void stop_this_cpu(void *dummy)
27237 {
27238 local_irq_disable();
27239 /*
27240@@ -454,16 +458,37 @@ static int __init idle_setup(char *str)
27241 }
27242 early_param("idle", idle_setup);
27243
27244-unsigned long arch_align_stack(unsigned long sp)
27245+#ifdef CONFIG_PAX_RANDKSTACK
27246+void pax_randomize_kstack(struct pt_regs *regs)
27247 {
27248- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
27249- sp -= get_random_int() % 8192;
27250- return sp & ~0xf;
27251-}
27252+ struct thread_struct *thread = &current->thread;
27253+ unsigned long time;
27254
27255-unsigned long arch_randomize_brk(struct mm_struct *mm)
27256-{
27257- unsigned long range_end = mm->brk + 0x02000000;
27258- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
27259-}
27260+ if (!randomize_va_space)
27261+ return;
27262+
27263+ if (v8086_mode(regs))
27264+ return;
27265
27266+ rdtscl(time);
27267+
27268+ /* P4 seems to return a 0 LSB, ignore it */
27269+#ifdef CONFIG_MPENTIUM4
27270+ time &= 0x3EUL;
27271+ time <<= 2;
27272+#elif defined(CONFIG_X86_64)
27273+ time &= 0xFUL;
27274+ time <<= 4;
27275+#else
27276+ time &= 0x1FUL;
27277+ time <<= 3;
27278+#endif
27279+
27280+ thread->sp0 ^= time;
27281+ load_sp0(init_tss + smp_processor_id(), thread);
27282+
27283+#ifdef CONFIG_X86_64
27284+ this_cpu_write(kernel_stack, thread->sp0);
27285+#endif
27286+}
27287+#endif
27288diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
27289index 7bc86bb..0ea06e8 100644
27290--- a/arch/x86/kernel/process_32.c
27291+++ b/arch/x86/kernel/process_32.c
27292@@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
27293 unsigned long thread_saved_pc(struct task_struct *tsk)
27294 {
27295 return ((unsigned long *)tsk->thread.sp)[3];
27296+//XXX return tsk->thread.eip;
27297 }
27298
27299 void __show_regs(struct pt_regs *regs, int all)
27300@@ -73,19 +74,18 @@ void __show_regs(struct pt_regs *regs, int all)
27301 unsigned long sp;
27302 unsigned short ss, gs;
27303
27304- if (user_mode_vm(regs)) {
27305+ if (user_mode(regs)) {
27306 sp = regs->sp;
27307 ss = regs->ss & 0xffff;
27308- gs = get_user_gs(regs);
27309 } else {
27310 sp = kernel_stack_pointer(regs);
27311 savesegment(ss, ss);
27312- savesegment(gs, gs);
27313 }
27314+ gs = get_user_gs(regs);
27315
27316 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
27317 (u16)regs->cs, regs->ip, regs->flags,
27318- smp_processor_id());
27319+ raw_smp_processor_id());
27320 print_symbol("EIP is at %s\n", regs->ip);
27321
27322 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
27323@@ -132,20 +132,21 @@ void release_thread(struct task_struct *dead_task)
27324 int copy_thread(unsigned long clone_flags, unsigned long sp,
27325 unsigned long arg, struct task_struct *p)
27326 {
27327- struct pt_regs *childregs = task_pt_regs(p);
27328+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
27329 struct task_struct *tsk;
27330 int err;
27331
27332 p->thread.sp = (unsigned long) childregs;
27333 p->thread.sp0 = (unsigned long) (childregs+1);
27334+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
27335
27336 if (unlikely(p->flags & PF_KTHREAD)) {
27337 /* kernel thread */
27338 memset(childregs, 0, sizeof(struct pt_regs));
27339 p->thread.ip = (unsigned long) ret_from_kernel_thread;
27340- task_user_gs(p) = __KERNEL_STACK_CANARY;
27341- childregs->ds = __USER_DS;
27342- childregs->es = __USER_DS;
27343+ savesegment(gs, childregs->gs);
27344+ childregs->ds = __KERNEL_DS;
27345+ childregs->es = __KERNEL_DS;
27346 childregs->fs = __KERNEL_PERCPU;
27347 childregs->bx = sp; /* function */
27348 childregs->bp = arg;
27349@@ -252,7 +253,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27350 struct thread_struct *prev = &prev_p->thread,
27351 *next = &next_p->thread;
27352 int cpu = smp_processor_id();
27353- struct tss_struct *tss = &per_cpu(init_tss, cpu);
27354+ struct tss_struct *tss = init_tss + cpu;
27355 fpu_switch_t fpu;
27356
27357 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
27358@@ -276,6 +277,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27359 */
27360 lazy_save_gs(prev->gs);
27361
27362+#ifdef CONFIG_PAX_MEMORY_UDEREF
27363+ __set_fs(task_thread_info(next_p)->addr_limit);
27364+#endif
27365+
27366 /*
27367 * Load the per-thread Thread-Local Storage descriptor.
27368 */
27369@@ -314,9 +319,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27370 */
27371 arch_end_context_switch(next_p);
27372
27373- this_cpu_write(kernel_stack,
27374- (unsigned long)task_stack_page(next_p) +
27375- THREAD_SIZE - KERNEL_STACK_OFFSET);
27376+ this_cpu_write(current_task, next_p);
27377+ this_cpu_write(current_tinfo, &next_p->tinfo);
27378+ this_cpu_write(kernel_stack, next->sp0);
27379
27380 /*
27381 * Restore %gs if needed (which is common)
27382@@ -326,8 +331,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27383
27384 switch_fpu_finish(next_p, fpu);
27385
27386- this_cpu_write(current_task, next_p);
27387-
27388 return prev_p;
27389 }
27390
27391@@ -357,4 +360,3 @@ unsigned long get_wchan(struct task_struct *p)
27392 } while (count++ < 16);
27393 return 0;
27394 }
27395-
27396diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
27397index ca5b02d..c0b2f6a 100644
27398--- a/arch/x86/kernel/process_64.c
27399+++ b/arch/x86/kernel/process_64.c
27400@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
27401 struct pt_regs *childregs;
27402 struct task_struct *me = current;
27403
27404- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
27405+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
27406 childregs = task_pt_regs(p);
27407 p->thread.sp = (unsigned long) childregs;
27408 p->thread.usersp = me->thread.usersp;
27409+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
27410 set_tsk_thread_flag(p, TIF_FORK);
27411 p->thread.fpu_counter = 0;
27412 p->thread.io_bitmap_ptr = NULL;
27413@@ -172,6 +173,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
27414 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
27415 savesegment(es, p->thread.es);
27416 savesegment(ds, p->thread.ds);
27417+ savesegment(ss, p->thread.ss);
27418+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
27419 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
27420
27421 if (unlikely(p->flags & PF_KTHREAD)) {
27422@@ -280,7 +283,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27423 struct thread_struct *prev = &prev_p->thread;
27424 struct thread_struct *next = &next_p->thread;
27425 int cpu = smp_processor_id();
27426- struct tss_struct *tss = &per_cpu(init_tss, cpu);
27427+ struct tss_struct *tss = init_tss + cpu;
27428 unsigned fsindex, gsindex;
27429 fpu_switch_t fpu;
27430
27431@@ -303,6 +306,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27432 if (unlikely(next->ds | prev->ds))
27433 loadsegment(ds, next->ds);
27434
27435+ savesegment(ss, prev->ss);
27436+ if (unlikely(next->ss != prev->ss))
27437+ loadsegment(ss, next->ss);
27438
27439 /* We must save %fs and %gs before load_TLS() because
27440 * %fs and %gs may be cleared by load_TLS().
27441@@ -362,6 +368,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27442 prev->usersp = this_cpu_read(old_rsp);
27443 this_cpu_write(old_rsp, next->usersp);
27444 this_cpu_write(current_task, next_p);
27445+ this_cpu_write(current_tinfo, &next_p->tinfo);
27446
27447 /*
27448 * If it were not for PREEMPT_ACTIVE we could guarantee that the
27449@@ -371,9 +378,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27450 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
27451 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
27452
27453- this_cpu_write(kernel_stack,
27454- (unsigned long)task_stack_page(next_p) +
27455- THREAD_SIZE - KERNEL_STACK_OFFSET);
27456+ this_cpu_write(kernel_stack, next->sp0);
27457
27458 /*
27459 * Now maybe reload the debug registers and handle I/O bitmaps
27460@@ -443,12 +448,11 @@ unsigned long get_wchan(struct task_struct *p)
27461 if (!p || p == current || p->state == TASK_RUNNING)
27462 return 0;
27463 stack = (unsigned long)task_stack_page(p);
27464- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
27465+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
27466 return 0;
27467 fp = *(u64 *)(p->thread.sp);
27468 do {
27469- if (fp < (unsigned long)stack ||
27470- fp >= (unsigned long)stack+THREAD_SIZE)
27471+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
27472 return 0;
27473 ip = *(u64 *)(fp+8);
27474 if (!in_sched_functions(ip))
27475diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
27476index 678c0ad..2fc2a7b 100644
27477--- a/arch/x86/kernel/ptrace.c
27478+++ b/arch/x86/kernel/ptrace.c
27479@@ -186,10 +186,10 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
27480 unsigned long sp = (unsigned long)&regs->sp;
27481 u32 *prev_esp;
27482
27483- if (context == (sp & ~(THREAD_SIZE - 1)))
27484+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
27485 return sp;
27486
27487- prev_esp = (u32 *)(context);
27488+ prev_esp = *(u32 **)(context);
27489 if (prev_esp)
27490 return (unsigned long)prev_esp;
27491
27492@@ -452,6 +452,20 @@ static int putreg(struct task_struct *child,
27493 if (child->thread.gs != value)
27494 return do_arch_prctl(child, ARCH_SET_GS, value);
27495 return 0;
27496+
27497+ case offsetof(struct user_regs_struct,ip):
27498+ /*
27499+ * Protect against any attempt to set ip to an
27500+ * impossible address. There are dragons lurking if the
27501+ * address is noncanonical. (This explicitly allows
27502+ * setting ip to TASK_SIZE_MAX, because user code can do
27503+ * that all by itself by running off the end of its
27504+ * address space.
27505+ */
27506+ if (value > TASK_SIZE_MAX)
27507+ return -EIO;
27508+ break;
27509+
27510 #endif
27511 }
27512
27513@@ -588,7 +602,7 @@ static void ptrace_triggered(struct perf_event *bp,
27514 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
27515 {
27516 int i;
27517- int dr7 = 0;
27518+ unsigned long dr7 = 0;
27519 struct arch_hw_breakpoint *info;
27520
27521 for (i = 0; i < HBP_NUM; i++) {
27522@@ -822,7 +836,7 @@ long arch_ptrace(struct task_struct *child, long request,
27523 unsigned long addr, unsigned long data)
27524 {
27525 int ret;
27526- unsigned long __user *datap = (unsigned long __user *)data;
27527+ unsigned long __user *datap = (__force unsigned long __user *)data;
27528
27529 switch (request) {
27530 /* read the word at location addr in the USER area. */
27531@@ -907,14 +921,14 @@ long arch_ptrace(struct task_struct *child, long request,
27532 if ((int) addr < 0)
27533 return -EIO;
27534 ret = do_get_thread_area(child, addr,
27535- (struct user_desc __user *)data);
27536+ (__force struct user_desc __user *) data);
27537 break;
27538
27539 case PTRACE_SET_THREAD_AREA:
27540 if ((int) addr < 0)
27541 return -EIO;
27542 ret = do_set_thread_area(child, addr,
27543- (struct user_desc __user *)data, 0);
27544+ (__force struct user_desc __user *) data, 0);
27545 break;
27546 #endif
27547
27548@@ -1292,7 +1306,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
27549
27550 #ifdef CONFIG_X86_64
27551
27552-static struct user_regset x86_64_regsets[] __read_mostly = {
27553+static user_regset_no_const x86_64_regsets[] __read_only = {
27554 [REGSET_GENERAL] = {
27555 .core_note_type = NT_PRSTATUS,
27556 .n = sizeof(struct user_regs_struct) / sizeof(long),
27557@@ -1333,7 +1347,7 @@ static const struct user_regset_view user_x86_64_view = {
27558 #endif /* CONFIG_X86_64 */
27559
27560 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
27561-static struct user_regset x86_32_regsets[] __read_mostly = {
27562+static user_regset_no_const x86_32_regsets[] __read_only = {
27563 [REGSET_GENERAL] = {
27564 .core_note_type = NT_PRSTATUS,
27565 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
27566@@ -1386,7 +1400,7 @@ static const struct user_regset_view user_x86_32_view = {
27567 */
27568 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
27569
27570-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
27571+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
27572 {
27573 #ifdef CONFIG_X86_64
27574 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
27575@@ -1421,7 +1435,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
27576 memset(info, 0, sizeof(*info));
27577 info->si_signo = SIGTRAP;
27578 info->si_code = si_code;
27579- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
27580+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
27581 }
27582
27583 void user_single_step_siginfo(struct task_struct *tsk,
27584@@ -1450,6 +1464,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
27585 # define IS_IA32 0
27586 #endif
27587
27588+#ifdef CONFIG_GRKERNSEC_SETXID
27589+extern void gr_delayed_cred_worker(void);
27590+#endif
27591+
27592 /*
27593 * We must return the syscall number to actually look up in the table.
27594 * This can be -1L to skip running any syscall at all.
27595@@ -1460,6 +1478,11 @@ long syscall_trace_enter(struct pt_regs *regs)
27596
27597 user_exit();
27598
27599+#ifdef CONFIG_GRKERNSEC_SETXID
27600+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
27601+ gr_delayed_cred_worker();
27602+#endif
27603+
27604 /*
27605 * If we stepped into a sysenter/syscall insn, it trapped in
27606 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
27607@@ -1515,6 +1538,11 @@ void syscall_trace_leave(struct pt_regs *regs)
27608 */
27609 user_exit();
27610
27611+#ifdef CONFIG_GRKERNSEC_SETXID
27612+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
27613+ gr_delayed_cred_worker();
27614+#endif
27615+
27616 audit_syscall_exit(regs);
27617
27618 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
27619diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
27620index 2f355d2..e75ed0a 100644
27621--- a/arch/x86/kernel/pvclock.c
27622+++ b/arch/x86/kernel/pvclock.c
27623@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
27624 reset_hung_task_detector();
27625 }
27626
27627-static atomic64_t last_value = ATOMIC64_INIT(0);
27628+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
27629
27630 void pvclock_resume(void)
27631 {
27632- atomic64_set(&last_value, 0);
27633+ atomic64_set_unchecked(&last_value, 0);
27634 }
27635
27636 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
27637@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
27638 * updating at the same time, and one of them could be slightly behind,
27639 * making the assumption that last_value always go forward fail to hold.
27640 */
27641- last = atomic64_read(&last_value);
27642+ last = atomic64_read_unchecked(&last_value);
27643 do {
27644 if (ret < last)
27645 return last;
27646- last = atomic64_cmpxchg(&last_value, last, ret);
27647+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
27648 } while (unlikely(last != ret));
27649
27650 return ret;
27651diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
27652index 17962e6..47f55db 100644
27653--- a/arch/x86/kernel/reboot.c
27654+++ b/arch/x86/kernel/reboot.c
27655@@ -69,6 +69,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
27656
27657 void __noreturn machine_real_restart(unsigned int type)
27658 {
27659+
27660+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
27661+ struct desc_struct *gdt;
27662+#endif
27663+
27664 local_irq_disable();
27665
27666 /*
27667@@ -96,7 +101,29 @@ void __noreturn machine_real_restart(unsigned int type)
27668
27669 /* Jump to the identity-mapped low memory code */
27670 #ifdef CONFIG_X86_32
27671- asm volatile("jmpl *%0" : :
27672+
27673+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
27674+ gdt = get_cpu_gdt_table(smp_processor_id());
27675+ pax_open_kernel();
27676+#ifdef CONFIG_PAX_MEMORY_UDEREF
27677+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
27678+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
27679+ loadsegment(ds, __KERNEL_DS);
27680+ loadsegment(es, __KERNEL_DS);
27681+ loadsegment(ss, __KERNEL_DS);
27682+#endif
27683+#ifdef CONFIG_PAX_KERNEXEC
27684+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
27685+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
27686+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
27687+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
27688+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
27689+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
27690+#endif
27691+ pax_close_kernel();
27692+#endif
27693+
27694+ asm volatile("ljmpl *%0" : :
27695 "rm" (real_mode_header->machine_real_restart_asm),
27696 "a" (type));
27697 #else
27698@@ -500,7 +527,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
27699 * This means that this function can never return, it can misbehave
27700 * by not rebooting properly and hanging.
27701 */
27702-static void native_machine_emergency_restart(void)
27703+static void __noreturn native_machine_emergency_restart(void)
27704 {
27705 int i;
27706 int attempt = 0;
27707@@ -620,13 +647,13 @@ void native_machine_shutdown(void)
27708 #endif
27709 }
27710
27711-static void __machine_emergency_restart(int emergency)
27712+static void __noreturn __machine_emergency_restart(int emergency)
27713 {
27714 reboot_emergency = emergency;
27715 machine_ops.emergency_restart();
27716 }
27717
27718-static void native_machine_restart(char *__unused)
27719+static void __noreturn native_machine_restart(char *__unused)
27720 {
27721 pr_notice("machine restart\n");
27722
27723@@ -635,7 +662,7 @@ static void native_machine_restart(char *__unused)
27724 __machine_emergency_restart(0);
27725 }
27726
27727-static void native_machine_halt(void)
27728+static void __noreturn native_machine_halt(void)
27729 {
27730 /* Stop other cpus and apics */
27731 machine_shutdown();
27732@@ -645,7 +672,7 @@ static void native_machine_halt(void)
27733 stop_this_cpu(NULL);
27734 }
27735
27736-static void native_machine_power_off(void)
27737+static void __noreturn native_machine_power_off(void)
27738 {
27739 if (pm_power_off) {
27740 if (!reboot_force)
27741@@ -654,9 +681,10 @@ static void native_machine_power_off(void)
27742 }
27743 /* A fallback in case there is no PM info available */
27744 tboot_shutdown(TB_SHUTDOWN_HALT);
27745+ unreachable();
27746 }
27747
27748-struct machine_ops machine_ops = {
27749+struct machine_ops machine_ops __read_only = {
27750 .power_off = native_machine_power_off,
27751 .shutdown = native_machine_shutdown,
27752 .emergency_restart = native_machine_emergency_restart,
27753diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
27754index c8e41e9..64049ef 100644
27755--- a/arch/x86/kernel/reboot_fixups_32.c
27756+++ b/arch/x86/kernel/reboot_fixups_32.c
27757@@ -57,7 +57,7 @@ struct device_fixup {
27758 unsigned int vendor;
27759 unsigned int device;
27760 void (*reboot_fixup)(struct pci_dev *);
27761-};
27762+} __do_const;
27763
27764 /*
27765 * PCI ids solely used for fixups_table go here
27766diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
27767index 3fd2c69..a444264 100644
27768--- a/arch/x86/kernel/relocate_kernel_64.S
27769+++ b/arch/x86/kernel/relocate_kernel_64.S
27770@@ -96,8 +96,7 @@ relocate_kernel:
27771
27772 /* jump to identity mapped page */
27773 addq $(identity_mapped - relocate_kernel), %r8
27774- pushq %r8
27775- ret
27776+ jmp *%r8
27777
27778 identity_mapped:
27779 /* set return address to 0 if not preserving context */
27780diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
27781index 41ead8d..7ccde23 100644
27782--- a/arch/x86/kernel/setup.c
27783+++ b/arch/x86/kernel/setup.c
27784@@ -110,6 +110,7 @@
27785 #include <asm/mce.h>
27786 #include <asm/alternative.h>
27787 #include <asm/prom.h>
27788+#include <asm/boot.h>
27789
27790 /*
27791 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
27792@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
27793 #endif
27794
27795
27796-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
27797-__visible unsigned long mmu_cr4_features;
27798+#ifdef CONFIG_X86_64
27799+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
27800+#elif defined(CONFIG_X86_PAE)
27801+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
27802 #else
27803-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
27804+__visible unsigned long mmu_cr4_features __read_only;
27805 #endif
27806
27807+void set_in_cr4(unsigned long mask)
27808+{
27809+ unsigned long cr4 = read_cr4();
27810+
27811+ if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
27812+ return;
27813+
27814+ pax_open_kernel();
27815+ mmu_cr4_features |= mask;
27816+ pax_close_kernel();
27817+
27818+ if (trampoline_cr4_features)
27819+ *trampoline_cr4_features = mmu_cr4_features;
27820+ cr4 |= mask;
27821+ write_cr4(cr4);
27822+}
27823+EXPORT_SYMBOL(set_in_cr4);
27824+
27825+void clear_in_cr4(unsigned long mask)
27826+{
27827+ unsigned long cr4 = read_cr4();
27828+
27829+ if (!(cr4 & mask) && cr4 == mmu_cr4_features)
27830+ return;
27831+
27832+ pax_open_kernel();
27833+ mmu_cr4_features &= ~mask;
27834+ pax_close_kernel();
27835+
27836+ if (trampoline_cr4_features)
27837+ *trampoline_cr4_features = mmu_cr4_features;
27838+ cr4 &= ~mask;
27839+ write_cr4(cr4);
27840+}
27841+EXPORT_SYMBOL(clear_in_cr4);
27842+
27843 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
27844 int bootloader_type, bootloader_version;
27845
27846@@ -772,7 +811,7 @@ static void __init trim_bios_range(void)
27847 * area (640->1Mb) as ram even though it is not.
27848 * take them out.
27849 */
27850- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
27851+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
27852
27853 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
27854 }
27855@@ -780,7 +819,7 @@ static void __init trim_bios_range(void)
27856 /* called before trim_bios_range() to spare extra sanitize */
27857 static void __init e820_add_kernel_range(void)
27858 {
27859- u64 start = __pa_symbol(_text);
27860+ u64 start = __pa_symbol(ktla_ktva(_text));
27861 u64 size = __pa_symbol(_end) - start;
27862
27863 /*
27864@@ -856,8 +895,12 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
27865
27866 void __init setup_arch(char **cmdline_p)
27867 {
27868+#ifdef CONFIG_X86_32
27869+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
27870+#else
27871 memblock_reserve(__pa_symbol(_text),
27872 (unsigned long)__bss_stop - (unsigned long)_text);
27873+#endif
27874
27875 early_reserve_initrd();
27876
27877@@ -946,14 +989,14 @@ void __init setup_arch(char **cmdline_p)
27878
27879 if (!boot_params.hdr.root_flags)
27880 root_mountflags &= ~MS_RDONLY;
27881- init_mm.start_code = (unsigned long) _text;
27882- init_mm.end_code = (unsigned long) _etext;
27883+ init_mm.start_code = ktla_ktva((unsigned long) _text);
27884+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
27885 init_mm.end_data = (unsigned long) _edata;
27886 init_mm.brk = _brk_end;
27887
27888- code_resource.start = __pa_symbol(_text);
27889- code_resource.end = __pa_symbol(_etext)-1;
27890- data_resource.start = __pa_symbol(_etext);
27891+ code_resource.start = __pa_symbol(ktla_ktva(_text));
27892+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
27893+ data_resource.start = __pa_symbol(_sdata);
27894 data_resource.end = __pa_symbol(_edata)-1;
27895 bss_resource.start = __pa_symbol(__bss_start);
27896 bss_resource.end = __pa_symbol(__bss_stop)-1;
27897diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
27898index 5cdff03..80fa283 100644
27899--- a/arch/x86/kernel/setup_percpu.c
27900+++ b/arch/x86/kernel/setup_percpu.c
27901@@ -21,19 +21,17 @@
27902 #include <asm/cpu.h>
27903 #include <asm/stackprotector.h>
27904
27905-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
27906+#ifdef CONFIG_SMP
27907+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
27908 EXPORT_PER_CPU_SYMBOL(cpu_number);
27909+#endif
27910
27911-#ifdef CONFIG_X86_64
27912 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
27913-#else
27914-#define BOOT_PERCPU_OFFSET 0
27915-#endif
27916
27917 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
27918 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
27919
27920-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
27921+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
27922 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
27923 };
27924 EXPORT_SYMBOL(__per_cpu_offset);
27925@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
27926 {
27927 #ifdef CONFIG_NEED_MULTIPLE_NODES
27928 pg_data_t *last = NULL;
27929- unsigned int cpu;
27930+ int cpu;
27931
27932 for_each_possible_cpu(cpu) {
27933 int node = early_cpu_to_node(cpu);
27934@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
27935 {
27936 #ifdef CONFIG_X86_32
27937 struct desc_struct gdt;
27938+ unsigned long base = per_cpu_offset(cpu);
27939
27940- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
27941- 0x2 | DESCTYPE_S, 0x8);
27942- gdt.s = 1;
27943+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
27944+ 0x83 | DESCTYPE_S, 0xC);
27945 write_gdt_entry(get_cpu_gdt_table(cpu),
27946 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
27947 #endif
27948@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
27949 /* alrighty, percpu areas up and running */
27950 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
27951 for_each_possible_cpu(cpu) {
27952+#ifdef CONFIG_CC_STACKPROTECTOR
27953+#ifdef CONFIG_X86_32
27954+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
27955+#endif
27956+#endif
27957 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
27958 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
27959 per_cpu(cpu_number, cpu) = cpu;
27960@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
27961 */
27962 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
27963 #endif
27964+#ifdef CONFIG_CC_STACKPROTECTOR
27965+#ifdef CONFIG_X86_32
27966+ if (!cpu)
27967+ per_cpu(stack_canary.canary, cpu) = canary;
27968+#endif
27969+#endif
27970 /*
27971 * Up to this point, the boot CPU has been using .init.data
27972 * area. Reload any changed state for the boot CPU.
27973diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
27974index 2851d63..83bf567 100644
27975--- a/arch/x86/kernel/signal.c
27976+++ b/arch/x86/kernel/signal.c
27977@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
27978 * Align the stack pointer according to the i386 ABI,
27979 * i.e. so that on function entry ((sp + 4) & 15) == 0.
27980 */
27981- sp = ((sp + 4) & -16ul) - 4;
27982+ sp = ((sp - 12) & -16ul) - 4;
27983 #else /* !CONFIG_X86_32 */
27984 sp = round_down(sp, 16) - 8;
27985 #endif
27986@@ -298,10 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27987 }
27988
27989 if (current->mm->context.vdso)
27990- restorer = current->mm->context.vdso +
27991- selected_vdso32->sym___kernel_sigreturn;
27992+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_sigreturn);
27993 else
27994- restorer = &frame->retcode;
27995+ restorer = (void __user *)&frame->retcode;
27996 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27997 restorer = ksig->ka.sa.sa_restorer;
27998
27999@@ -315,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
28000 * reasons and because gdb uses it as a signature to notice
28001 * signal handler stack frames.
28002 */
28003- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
28004+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
28005
28006 if (err)
28007 return -EFAULT;
28008@@ -362,8 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
28009 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
28010
28011 /* Set up to return from userspace. */
28012- restorer = current->mm->context.vdso +
28013- selected_vdso32->sym___kernel_rt_sigreturn;
28014+ if (current->mm->context.vdso)
28015+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_rt_sigreturn);
28016+ else
28017+ restorer = (void __user *)&frame->retcode;
28018 if (ksig->ka.sa.sa_flags & SA_RESTORER)
28019 restorer = ksig->ka.sa.sa_restorer;
28020 put_user_ex(restorer, &frame->pretcode);
28021@@ -375,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
28022 * reasons and because gdb uses it as a signature to notice
28023 * signal handler stack frames.
28024 */
28025- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
28026+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
28027 } put_user_catch(err);
28028
28029 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
28030@@ -611,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
28031 {
28032 int usig = signr_convert(ksig->sig);
28033 sigset_t *set = sigmask_to_save();
28034- compat_sigset_t *cset = (compat_sigset_t *) set;
28035+ sigset_t sigcopy;
28036+ compat_sigset_t *cset;
28037+
28038+ sigcopy = *set;
28039+
28040+ cset = (compat_sigset_t *) &sigcopy;
28041
28042 /* Set up the stack frame */
28043 if (is_ia32_frame()) {
28044@@ -622,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
28045 } else if (is_x32_frame()) {
28046 return x32_setup_rt_frame(ksig, cset, regs);
28047 } else {
28048- return __setup_rt_frame(ksig->sig, ksig, set, regs);
28049+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
28050 }
28051 }
28052
28053diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
28054index be8e1bd..a3d93fa 100644
28055--- a/arch/x86/kernel/smp.c
28056+++ b/arch/x86/kernel/smp.c
28057@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
28058
28059 __setup("nonmi_ipi", nonmi_ipi_setup);
28060
28061-struct smp_ops smp_ops = {
28062+struct smp_ops smp_ops __read_only = {
28063 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
28064 .smp_prepare_cpus = native_smp_prepare_cpus,
28065 .smp_cpus_done = native_smp_cpus_done,
28066diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
28067index 42a2dca..35a07aa 100644
28068--- a/arch/x86/kernel/smpboot.c
28069+++ b/arch/x86/kernel/smpboot.c
28070@@ -226,14 +226,17 @@ static void notrace start_secondary(void *unused)
28071
28072 enable_start_cpu0 = 0;
28073
28074-#ifdef CONFIG_X86_32
28075+ /* otherwise gcc will move up smp_processor_id before the cpu_init */
28076+ barrier();
28077+
28078 /* switch away from the initial page table */
28079+#ifdef CONFIG_PAX_PER_CPU_PGD
28080+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
28081+#else
28082 load_cr3(swapper_pg_dir);
28083+#endif
28084 __flush_tlb_all();
28085-#endif
28086
28087- /* otherwise gcc will move up smp_processor_id before the cpu_init */
28088- barrier();
28089 /*
28090 * Check TSC synchronization with the BP:
28091 */
28092@@ -760,8 +763,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
28093 alternatives_enable_smp();
28094
28095 idle->thread.sp = (unsigned long) (((struct pt_regs *)
28096- (THREAD_SIZE + task_stack_page(idle))) - 1);
28097+ (THREAD_SIZE - 16 + task_stack_page(idle))) - 1);
28098 per_cpu(current_task, cpu) = idle;
28099+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
28100
28101 #ifdef CONFIG_X86_32
28102 /* Stack for startup_32 can be just as for start_secondary onwards */
28103@@ -770,10 +774,10 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
28104 clear_tsk_thread_flag(idle, TIF_FORK);
28105 initial_gs = per_cpu_offset(cpu);
28106 #endif
28107- per_cpu(kernel_stack, cpu) =
28108- (unsigned long)task_stack_page(idle) -
28109- KERNEL_STACK_OFFSET + THREAD_SIZE;
28110+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
28111+ pax_open_kernel();
28112 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
28113+ pax_close_kernel();
28114 initial_code = (unsigned long)start_secondary;
28115 stack_start = idle->thread.sp;
28116
28117@@ -919,6 +923,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
28118 /* the FPU context is blank, nobody can own it */
28119 __cpu_disable_lazy_restore(cpu);
28120
28121+#ifdef CONFIG_PAX_PER_CPU_PGD
28122+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
28123+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
28124+ KERNEL_PGD_PTRS);
28125+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
28126+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
28127+ KERNEL_PGD_PTRS);
28128+#endif
28129+
28130 err = do_boot_cpu(apicid, cpu, tidle);
28131 if (err) {
28132 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
28133diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
28134index 9b4d51d..5d28b58 100644
28135--- a/arch/x86/kernel/step.c
28136+++ b/arch/x86/kernel/step.c
28137@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
28138 struct desc_struct *desc;
28139 unsigned long base;
28140
28141- seg &= ~7UL;
28142+ seg >>= 3;
28143
28144 mutex_lock(&child->mm->context.lock);
28145- if (unlikely((seg >> 3) >= child->mm->context.size))
28146+ if (unlikely(seg >= child->mm->context.size))
28147 addr = -1L; /* bogus selector, access would fault */
28148 else {
28149 desc = child->mm->context.ldt + seg;
28150@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
28151 addr += base;
28152 }
28153 mutex_unlock(&child->mm->context.lock);
28154- }
28155+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
28156+ addr = ktla_ktva(addr);
28157
28158 return addr;
28159 }
28160@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
28161 unsigned char opcode[15];
28162 unsigned long addr = convert_ip_to_linear(child, regs);
28163
28164+ if (addr == -EINVAL)
28165+ return 0;
28166+
28167 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
28168 for (i = 0; i < copied; i++) {
28169 switch (opcode[i]) {
28170diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
28171new file mode 100644
28172index 0000000..5877189
28173--- /dev/null
28174+++ b/arch/x86/kernel/sys_i386_32.c
28175@@ -0,0 +1,189 @@
28176+/*
28177+ * This file contains various random system calls that
28178+ * have a non-standard calling sequence on the Linux/i386
28179+ * platform.
28180+ */
28181+
28182+#include <linux/errno.h>
28183+#include <linux/sched.h>
28184+#include <linux/mm.h>
28185+#include <linux/fs.h>
28186+#include <linux/smp.h>
28187+#include <linux/sem.h>
28188+#include <linux/msg.h>
28189+#include <linux/shm.h>
28190+#include <linux/stat.h>
28191+#include <linux/syscalls.h>
28192+#include <linux/mman.h>
28193+#include <linux/file.h>
28194+#include <linux/utsname.h>
28195+#include <linux/ipc.h>
28196+#include <linux/elf.h>
28197+
28198+#include <linux/uaccess.h>
28199+#include <linux/unistd.h>
28200+
28201+#include <asm/syscalls.h>
28202+
28203+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
28204+{
28205+ unsigned long pax_task_size = TASK_SIZE;
28206+
28207+#ifdef CONFIG_PAX_SEGMEXEC
28208+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
28209+ pax_task_size = SEGMEXEC_TASK_SIZE;
28210+#endif
28211+
28212+ if (flags & MAP_FIXED)
28213+ if (len > pax_task_size || addr > pax_task_size - len)
28214+ return -EINVAL;
28215+
28216+ return 0;
28217+}
28218+
28219+/*
28220+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
28221+ */
28222+static unsigned long get_align_mask(void)
28223+{
28224+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
28225+ return 0;
28226+
28227+ if (!(current->flags & PF_RANDOMIZE))
28228+ return 0;
28229+
28230+ return va_align.mask;
28231+}
28232+
28233+unsigned long
28234+arch_get_unmapped_area(struct file *filp, unsigned long addr,
28235+ unsigned long len, unsigned long pgoff, unsigned long flags)
28236+{
28237+ struct mm_struct *mm = current->mm;
28238+ struct vm_area_struct *vma;
28239+ unsigned long pax_task_size = TASK_SIZE;
28240+ struct vm_unmapped_area_info info;
28241+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
28242+
28243+#ifdef CONFIG_PAX_SEGMEXEC
28244+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
28245+ pax_task_size = SEGMEXEC_TASK_SIZE;
28246+#endif
28247+
28248+ pax_task_size -= PAGE_SIZE;
28249+
28250+ if (len > pax_task_size)
28251+ return -ENOMEM;
28252+
28253+ if (flags & MAP_FIXED)
28254+ return addr;
28255+
28256+#ifdef CONFIG_PAX_RANDMMAP
28257+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28258+#endif
28259+
28260+ if (addr) {
28261+ addr = PAGE_ALIGN(addr);
28262+ if (pax_task_size - len >= addr) {
28263+ vma = find_vma(mm, addr);
28264+ if (check_heap_stack_gap(vma, addr, len, offset))
28265+ return addr;
28266+ }
28267+ }
28268+
28269+ info.flags = 0;
28270+ info.length = len;
28271+ info.align_mask = filp ? get_align_mask() : 0;
28272+ info.align_offset = pgoff << PAGE_SHIFT;
28273+ info.threadstack_offset = offset;
28274+
28275+#ifdef CONFIG_PAX_PAGEEXEC
28276+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
28277+ info.low_limit = 0x00110000UL;
28278+ info.high_limit = mm->start_code;
28279+
28280+#ifdef CONFIG_PAX_RANDMMAP
28281+ if (mm->pax_flags & MF_PAX_RANDMMAP)
28282+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
28283+#endif
28284+
28285+ if (info.low_limit < info.high_limit) {
28286+ addr = vm_unmapped_area(&info);
28287+ if (!IS_ERR_VALUE(addr))
28288+ return addr;
28289+ }
28290+ } else
28291+#endif
28292+
28293+ info.low_limit = mm->mmap_base;
28294+ info.high_limit = pax_task_size;
28295+
28296+ return vm_unmapped_area(&info);
28297+}
28298+
28299+unsigned long
28300+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
28301+ const unsigned long len, const unsigned long pgoff,
28302+ const unsigned long flags)
28303+{
28304+ struct vm_area_struct *vma;
28305+ struct mm_struct *mm = current->mm;
28306+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
28307+ struct vm_unmapped_area_info info;
28308+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
28309+
28310+#ifdef CONFIG_PAX_SEGMEXEC
28311+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
28312+ pax_task_size = SEGMEXEC_TASK_SIZE;
28313+#endif
28314+
28315+ pax_task_size -= PAGE_SIZE;
28316+
28317+ /* requested length too big for entire address space */
28318+ if (len > pax_task_size)
28319+ return -ENOMEM;
28320+
28321+ if (flags & MAP_FIXED)
28322+ return addr;
28323+
28324+#ifdef CONFIG_PAX_PAGEEXEC
28325+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
28326+ goto bottomup;
28327+#endif
28328+
28329+#ifdef CONFIG_PAX_RANDMMAP
28330+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28331+#endif
28332+
28333+ /* requesting a specific address */
28334+ if (addr) {
28335+ addr = PAGE_ALIGN(addr);
28336+ if (pax_task_size - len >= addr) {
28337+ vma = find_vma(mm, addr);
28338+ if (check_heap_stack_gap(vma, addr, len, offset))
28339+ return addr;
28340+ }
28341+ }
28342+
28343+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
28344+ info.length = len;
28345+ info.low_limit = PAGE_SIZE;
28346+ info.high_limit = mm->mmap_base;
28347+ info.align_mask = filp ? get_align_mask() : 0;
28348+ info.align_offset = pgoff << PAGE_SHIFT;
28349+ info.threadstack_offset = offset;
28350+
28351+ addr = vm_unmapped_area(&info);
28352+ if (!(addr & ~PAGE_MASK))
28353+ return addr;
28354+ VM_BUG_ON(addr != -ENOMEM);
28355+
28356+bottomup:
28357+ /*
28358+ * A failed mmap() very likely causes application failure,
28359+ * so fall back to the bottom-up function here. This scenario
28360+ * can happen with large stack limits and large mmap()
28361+ * allocations.
28362+ */
28363+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
28364+}
28365diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
28366index 30277e2..5664a29 100644
28367--- a/arch/x86/kernel/sys_x86_64.c
28368+++ b/arch/x86/kernel/sys_x86_64.c
28369@@ -81,8 +81,8 @@ out:
28370 return error;
28371 }
28372
28373-static void find_start_end(unsigned long flags, unsigned long *begin,
28374- unsigned long *end)
28375+static void find_start_end(struct mm_struct *mm, unsigned long flags,
28376+ unsigned long *begin, unsigned long *end)
28377 {
28378 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
28379 unsigned long new_begin;
28380@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
28381 *begin = new_begin;
28382 }
28383 } else {
28384- *begin = current->mm->mmap_legacy_base;
28385+ *begin = mm->mmap_legacy_base;
28386 *end = TASK_SIZE;
28387 }
28388 }
28389@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
28390 struct vm_area_struct *vma;
28391 struct vm_unmapped_area_info info;
28392 unsigned long begin, end;
28393+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
28394
28395 if (flags & MAP_FIXED)
28396 return addr;
28397
28398- find_start_end(flags, &begin, &end);
28399+ find_start_end(mm, flags, &begin, &end);
28400
28401 if (len > end)
28402 return -ENOMEM;
28403
28404+#ifdef CONFIG_PAX_RANDMMAP
28405+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28406+#endif
28407+
28408 if (addr) {
28409 addr = PAGE_ALIGN(addr);
28410 vma = find_vma(mm, addr);
28411- if (end - len >= addr &&
28412- (!vma || addr + len <= vma->vm_start))
28413+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
28414 return addr;
28415 }
28416
28417@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
28418 info.high_limit = end;
28419 info.align_mask = filp ? get_align_mask() : 0;
28420 info.align_offset = pgoff << PAGE_SHIFT;
28421+ info.threadstack_offset = offset;
28422 return vm_unmapped_area(&info);
28423 }
28424
28425@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
28426 struct mm_struct *mm = current->mm;
28427 unsigned long addr = addr0;
28428 struct vm_unmapped_area_info info;
28429+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
28430
28431 /* requested length too big for entire address space */
28432 if (len > TASK_SIZE)
28433@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
28434 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
28435 goto bottomup;
28436
28437+#ifdef CONFIG_PAX_RANDMMAP
28438+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28439+#endif
28440+
28441 /* requesting a specific address */
28442 if (addr) {
28443 addr = PAGE_ALIGN(addr);
28444 vma = find_vma(mm, addr);
28445- if (TASK_SIZE - len >= addr &&
28446- (!vma || addr + len <= vma->vm_start))
28447+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
28448 return addr;
28449 }
28450
28451@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
28452 info.high_limit = mm->mmap_base;
28453 info.align_mask = filp ? get_align_mask() : 0;
28454 info.align_offset = pgoff << PAGE_SHIFT;
28455+ info.threadstack_offset = offset;
28456 addr = vm_unmapped_area(&info);
28457 if (!(addr & ~PAGE_MASK))
28458 return addr;
28459diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
28460index 91a4496..bb87552 100644
28461--- a/arch/x86/kernel/tboot.c
28462+++ b/arch/x86/kernel/tboot.c
28463@@ -221,7 +221,7 @@ static int tboot_setup_sleep(void)
28464
28465 void tboot_shutdown(u32 shutdown_type)
28466 {
28467- void (*shutdown)(void);
28468+ void (* __noreturn shutdown)(void);
28469
28470 if (!tboot_enabled())
28471 return;
28472@@ -243,7 +243,7 @@ void tboot_shutdown(u32 shutdown_type)
28473
28474 switch_to_tboot_pt();
28475
28476- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
28477+ shutdown = (void *)(unsigned long)tboot->shutdown_entry;
28478 shutdown();
28479
28480 /* should not reach here */
28481@@ -310,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
28482 return -ENODEV;
28483 }
28484
28485-static atomic_t ap_wfs_count;
28486+static atomic_unchecked_t ap_wfs_count;
28487
28488 static int tboot_wait_for_aps(int num_aps)
28489 {
28490@@ -334,9 +334,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
28491 {
28492 switch (action) {
28493 case CPU_DYING:
28494- atomic_inc(&ap_wfs_count);
28495+ atomic_inc_unchecked(&ap_wfs_count);
28496 if (num_online_cpus() == 1)
28497- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
28498+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
28499 return NOTIFY_BAD;
28500 break;
28501 }
28502@@ -422,7 +422,7 @@ static __init int tboot_late_init(void)
28503
28504 tboot_create_trampoline();
28505
28506- atomic_set(&ap_wfs_count, 0);
28507+ atomic_set_unchecked(&ap_wfs_count, 0);
28508 register_hotcpu_notifier(&tboot_cpu_notifier);
28509
28510 #ifdef CONFIG_DEBUG_FS
28511diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
28512index 0fa2960..91eabbe 100644
28513--- a/arch/x86/kernel/time.c
28514+++ b/arch/x86/kernel/time.c
28515@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
28516 {
28517 unsigned long pc = instruction_pointer(regs);
28518
28519- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
28520+ if (!user_mode(regs) && in_lock_functions(pc)) {
28521 #ifdef CONFIG_FRAME_POINTER
28522- return *(unsigned long *)(regs->bp + sizeof(long));
28523+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
28524 #else
28525 unsigned long *sp =
28526 (unsigned long *)kernel_stack_pointer(regs);
28527@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
28528 * or above a saved flags. Eflags has bits 22-31 zero,
28529 * kernel addresses don't.
28530 */
28531+
28532+#ifdef CONFIG_PAX_KERNEXEC
28533+ return ktla_ktva(sp[0]);
28534+#else
28535 if (sp[0] >> 22)
28536 return sp[0];
28537 if (sp[1] >> 22)
28538 return sp[1];
28539 #endif
28540+
28541+#endif
28542 }
28543 return pc;
28544 }
28545diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
28546index f7fec09..9991981 100644
28547--- a/arch/x86/kernel/tls.c
28548+++ b/arch/x86/kernel/tls.c
28549@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
28550 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
28551 return -EINVAL;
28552
28553+#ifdef CONFIG_PAX_SEGMEXEC
28554+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
28555+ return -EINVAL;
28556+#endif
28557+
28558 set_tls_desc(p, idx, &info, 1);
28559
28560 return 0;
28561@@ -200,7 +205,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
28562
28563 if (kbuf)
28564 info = kbuf;
28565- else if (__copy_from_user(infobuf, ubuf, count))
28566+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
28567 return -EFAULT;
28568 else
28569 info = infobuf;
28570diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
28571index 1c113db..287b42e 100644
28572--- a/arch/x86/kernel/tracepoint.c
28573+++ b/arch/x86/kernel/tracepoint.c
28574@@ -9,11 +9,11 @@
28575 #include <linux/atomic.h>
28576
28577 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
28578-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
28579+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
28580 (unsigned long) trace_idt_table };
28581
28582 /* No need to be aligned, but done to keep all IDTs defined the same way. */
28583-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
28584+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
28585
28586 static int trace_irq_vector_refcount;
28587 static DEFINE_MUTEX(irq_vector_mutex);
28588diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
28589index 0d0e922..0886373 100644
28590--- a/arch/x86/kernel/traps.c
28591+++ b/arch/x86/kernel/traps.c
28592@@ -67,7 +67,7 @@
28593 #include <asm/proto.h>
28594
28595 /* No need to be aligned, but done to keep all IDTs defined the same way. */
28596-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
28597+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
28598 #else
28599 #include <asm/processor-flags.h>
28600 #include <asm/setup.h>
28601@@ -76,7 +76,7 @@ asmlinkage int system_call(void);
28602 #endif
28603
28604 /* Must be page-aligned because the real IDT is used in a fixmap. */
28605-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
28606+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
28607
28608 DECLARE_BITMAP(used_vectors, NR_VECTORS);
28609 EXPORT_SYMBOL_GPL(used_vectors);
28610@@ -108,11 +108,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
28611 }
28612
28613 static nokprobe_inline int
28614-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
28615+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
28616 struct pt_regs *regs, long error_code)
28617 {
28618 #ifdef CONFIG_X86_32
28619- if (regs->flags & X86_VM_MASK) {
28620+ if (v8086_mode(regs)) {
28621 /*
28622 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
28623 * On nmi (interrupt 2), do_trap should not be called.
28624@@ -125,12 +125,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
28625 return -1;
28626 }
28627 #endif
28628- if (!user_mode(regs)) {
28629+ if (!user_mode_novm(regs)) {
28630 if (!fixup_exception(regs)) {
28631 tsk->thread.error_code = error_code;
28632 tsk->thread.trap_nr = trapnr;
28633+
28634+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28635+ if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
28636+ str = "PAX: suspicious stack segment fault";
28637+#endif
28638+
28639 die(str, regs, error_code);
28640 }
28641+
28642+#ifdef CONFIG_PAX_REFCOUNT
28643+ if (trapnr == X86_TRAP_OF)
28644+ pax_report_refcount_overflow(regs);
28645+#endif
28646+
28647 return 0;
28648 }
28649
28650@@ -169,7 +181,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
28651 }
28652
28653 static void
28654-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
28655+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
28656 long error_code, siginfo_t *info)
28657 {
28658 struct task_struct *tsk = current;
28659@@ -193,7 +205,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
28660 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
28661 printk_ratelimit()) {
28662 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
28663- tsk->comm, tsk->pid, str,
28664+ tsk->comm, task_pid_nr(tsk), str,
28665 regs->ip, regs->sp, error_code);
28666 print_vma_addr(" in ", regs->ip);
28667 pr_cont("\n");
28668@@ -266,6 +278,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
28669 tsk->thread.error_code = error_code;
28670 tsk->thread.trap_nr = X86_TRAP_DF;
28671
28672+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
28673+ if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
28674+ die("grsec: kernel stack overflow detected", regs, error_code);
28675+#endif
28676+
28677 #ifdef CONFIG_DOUBLEFAULT
28678 df_debug(regs, error_code);
28679 #endif
28680@@ -288,7 +305,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
28681 conditional_sti(regs);
28682
28683 #ifdef CONFIG_X86_32
28684- if (regs->flags & X86_VM_MASK) {
28685+ if (v8086_mode(regs)) {
28686 local_irq_enable();
28687 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
28688 goto exit;
28689@@ -296,18 +313,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
28690 #endif
28691
28692 tsk = current;
28693- if (!user_mode(regs)) {
28694+ if (!user_mode_novm(regs)) {
28695 if (fixup_exception(regs))
28696 goto exit;
28697
28698 tsk->thread.error_code = error_code;
28699 tsk->thread.trap_nr = X86_TRAP_GP;
28700 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
28701- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
28702+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
28703+
28704+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28705+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
28706+ die("PAX: suspicious general protection fault", regs, error_code);
28707+ else
28708+#endif
28709+
28710 die("general protection fault", regs, error_code);
28711+ }
28712 goto exit;
28713 }
28714
28715+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
28716+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
28717+ struct mm_struct *mm = tsk->mm;
28718+ unsigned long limit;
28719+
28720+ down_write(&mm->mmap_sem);
28721+ limit = mm->context.user_cs_limit;
28722+ if (limit < TASK_SIZE) {
28723+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
28724+ up_write(&mm->mmap_sem);
28725+ return;
28726+ }
28727+ up_write(&mm->mmap_sem);
28728+ }
28729+#endif
28730+
28731 tsk->thread.error_code = error_code;
28732 tsk->thread.trap_nr = X86_TRAP_GP;
28733
28734@@ -481,7 +522,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28735 /* It's safe to allow irq's after DR6 has been saved */
28736 preempt_conditional_sti(regs);
28737
28738- if (regs->flags & X86_VM_MASK) {
28739+ if (v8086_mode(regs)) {
28740 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
28741 X86_TRAP_DB);
28742 preempt_conditional_cli(regs);
28743@@ -496,7 +537,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28744 * We already checked v86 mode above, so we can check for kernel mode
28745 * by just checking the CPL of CS.
28746 */
28747- if ((dr6 & DR_STEP) && !user_mode(regs)) {
28748+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
28749 tsk->thread.debugreg6 &= ~DR_STEP;
28750 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
28751 regs->flags &= ~X86_EFLAGS_TF;
28752@@ -529,7 +570,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
28753 return;
28754 conditional_sti(regs);
28755
28756- if (!user_mode_vm(regs))
28757+ if (!user_mode(regs))
28758 {
28759 if (!fixup_exception(regs)) {
28760 task->thread.error_code = error_code;
28761diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
28762index b6025f9..0cc6a1d 100644
28763--- a/arch/x86/kernel/tsc.c
28764+++ b/arch/x86/kernel/tsc.c
28765@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
28766 */
28767 smp_wmb();
28768
28769- ACCESS_ONCE(c2n->head) = data;
28770+ ACCESS_ONCE_RW(c2n->head) = data;
28771 }
28772
28773 /*
28774diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
28775index 5d1cbfe..2a21feb 100644
28776--- a/arch/x86/kernel/uprobes.c
28777+++ b/arch/x86/kernel/uprobes.c
28778@@ -845,7 +845,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
28779 int ret = NOTIFY_DONE;
28780
28781 /* We are only interested in userspace traps */
28782- if (regs && !user_mode_vm(regs))
28783+ if (regs && !user_mode(regs))
28784 return NOTIFY_DONE;
28785
28786 switch (val) {
28787@@ -919,7 +919,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
28788
28789 if (nleft != rasize) {
28790 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
28791- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
28792+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
28793
28794 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
28795 }
28796diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
28797index b9242ba..50c5edd 100644
28798--- a/arch/x86/kernel/verify_cpu.S
28799+++ b/arch/x86/kernel/verify_cpu.S
28800@@ -20,6 +20,7 @@
28801 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
28802 * arch/x86/kernel/trampoline_64.S: secondary processor verification
28803 * arch/x86/kernel/head_32.S: processor startup
28804+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
28805 *
28806 * verify_cpu, returns the status of longmode and SSE in register %eax.
28807 * 0: Success 1: Failure
28808diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
28809index e8edcf5..27f9344 100644
28810--- a/arch/x86/kernel/vm86_32.c
28811+++ b/arch/x86/kernel/vm86_32.c
28812@@ -44,6 +44,7 @@
28813 #include <linux/ptrace.h>
28814 #include <linux/audit.h>
28815 #include <linux/stddef.h>
28816+#include <linux/grsecurity.h>
28817
28818 #include <asm/uaccess.h>
28819 #include <asm/io.h>
28820@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
28821 do_exit(SIGSEGV);
28822 }
28823
28824- tss = &per_cpu(init_tss, get_cpu());
28825+ tss = init_tss + get_cpu();
28826 current->thread.sp0 = current->thread.saved_sp0;
28827 current->thread.sysenter_cs = __KERNEL_CS;
28828 load_sp0(tss, &current->thread);
28829@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
28830
28831 if (tsk->thread.saved_sp0)
28832 return -EPERM;
28833+
28834+#ifdef CONFIG_GRKERNSEC_VM86
28835+ if (!capable(CAP_SYS_RAWIO)) {
28836+ gr_handle_vm86();
28837+ return -EPERM;
28838+ }
28839+#endif
28840+
28841 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
28842 offsetof(struct kernel_vm86_struct, vm86plus) -
28843 sizeof(info.regs));
28844@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
28845 int tmp;
28846 struct vm86plus_struct __user *v86;
28847
28848+#ifdef CONFIG_GRKERNSEC_VM86
28849+ if (!capable(CAP_SYS_RAWIO)) {
28850+ gr_handle_vm86();
28851+ return -EPERM;
28852+ }
28853+#endif
28854+
28855 tsk = current;
28856 switch (cmd) {
28857 case VM86_REQUEST_IRQ:
28858@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
28859 tsk->thread.saved_fs = info->regs32->fs;
28860 tsk->thread.saved_gs = get_user_gs(info->regs32);
28861
28862- tss = &per_cpu(init_tss, get_cpu());
28863+ tss = init_tss + get_cpu();
28864 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
28865 if (cpu_has_sep)
28866 tsk->thread.sysenter_cs = 0;
28867@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
28868 goto cannot_handle;
28869 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
28870 goto cannot_handle;
28871- intr_ptr = (unsigned long __user *) (i << 2);
28872+ intr_ptr = (__force unsigned long __user *) (i << 2);
28873 if (get_user(segoffs, intr_ptr))
28874 goto cannot_handle;
28875 if ((segoffs >> 16) == BIOSSEG)
28876diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
28877index 49edf2d..c0d1362 100644
28878--- a/arch/x86/kernel/vmlinux.lds.S
28879+++ b/arch/x86/kernel/vmlinux.lds.S
28880@@ -26,6 +26,13 @@
28881 #include <asm/page_types.h>
28882 #include <asm/cache.h>
28883 #include <asm/boot.h>
28884+#include <asm/segment.h>
28885+
28886+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28887+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
28888+#else
28889+#define __KERNEL_TEXT_OFFSET 0
28890+#endif
28891
28892 #undef i386 /* in case the preprocessor is a 32bit one */
28893
28894@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
28895
28896 PHDRS {
28897 text PT_LOAD FLAGS(5); /* R_E */
28898+#ifdef CONFIG_X86_32
28899+ module PT_LOAD FLAGS(5); /* R_E */
28900+#endif
28901+#ifdef CONFIG_XEN
28902+ rodata PT_LOAD FLAGS(5); /* R_E */
28903+#else
28904+ rodata PT_LOAD FLAGS(4); /* R__ */
28905+#endif
28906 data PT_LOAD FLAGS(6); /* RW_ */
28907-#ifdef CONFIG_X86_64
28908+ init.begin PT_LOAD FLAGS(6); /* RW_ */
28909 #ifdef CONFIG_SMP
28910 percpu PT_LOAD FLAGS(6); /* RW_ */
28911 #endif
28912+ text.init PT_LOAD FLAGS(5); /* R_E */
28913+ text.exit PT_LOAD FLAGS(5); /* R_E */
28914 init PT_LOAD FLAGS(7); /* RWE */
28915-#endif
28916 note PT_NOTE FLAGS(0); /* ___ */
28917 }
28918
28919 SECTIONS
28920 {
28921 #ifdef CONFIG_X86_32
28922- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
28923- phys_startup_32 = startup_32 - LOAD_OFFSET;
28924+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
28925 #else
28926- . = __START_KERNEL;
28927- phys_startup_64 = startup_64 - LOAD_OFFSET;
28928+ . = __START_KERNEL;
28929 #endif
28930
28931 /* Text and read-only data */
28932- .text : AT(ADDR(.text) - LOAD_OFFSET) {
28933- _text = .;
28934+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28935 /* bootstrapping code */
28936+#ifdef CONFIG_X86_32
28937+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28938+#else
28939+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28940+#endif
28941+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28942+ _text = .;
28943 HEAD_TEXT
28944 . = ALIGN(8);
28945 _stext = .;
28946@@ -104,13 +124,47 @@ SECTIONS
28947 IRQENTRY_TEXT
28948 *(.fixup)
28949 *(.gnu.warning)
28950- /* End of text section */
28951- _etext = .;
28952 } :text = 0x9090
28953
28954- NOTES :text :note
28955+ . += __KERNEL_TEXT_OFFSET;
28956
28957- EXCEPTION_TABLE(16) :text = 0x9090
28958+#ifdef CONFIG_X86_32
28959+ . = ALIGN(PAGE_SIZE);
28960+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
28961+
28962+#ifdef CONFIG_PAX_KERNEXEC
28963+ MODULES_EXEC_VADDR = .;
28964+ BYTE(0)
28965+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
28966+ . = ALIGN(HPAGE_SIZE) - 1;
28967+ MODULES_EXEC_END = .;
28968+#endif
28969+
28970+ } :module
28971+#endif
28972+
28973+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
28974+ /* End of text section */
28975+ BYTE(0)
28976+ _etext = . - __KERNEL_TEXT_OFFSET;
28977+ }
28978+
28979+#ifdef CONFIG_X86_32
28980+ . = ALIGN(PAGE_SIZE);
28981+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
28982+ . = ALIGN(PAGE_SIZE);
28983+ *(.empty_zero_page)
28984+ *(.initial_pg_fixmap)
28985+ *(.initial_pg_pmd)
28986+ *(.initial_page_table)
28987+ *(.swapper_pg_dir)
28988+ } :rodata
28989+#endif
28990+
28991+ . = ALIGN(PAGE_SIZE);
28992+ NOTES :rodata :note
28993+
28994+ EXCEPTION_TABLE(16) :rodata
28995
28996 #if defined(CONFIG_DEBUG_RODATA)
28997 /* .text should occupy whole number of pages */
28998@@ -122,16 +176,20 @@ SECTIONS
28999
29000 /* Data */
29001 .data : AT(ADDR(.data) - LOAD_OFFSET) {
29002+
29003+#ifdef CONFIG_PAX_KERNEXEC
29004+ . = ALIGN(HPAGE_SIZE);
29005+#else
29006+ . = ALIGN(PAGE_SIZE);
29007+#endif
29008+
29009 /* Start of data section */
29010 _sdata = .;
29011
29012 /* init_task */
29013 INIT_TASK_DATA(THREAD_SIZE)
29014
29015-#ifdef CONFIG_X86_32
29016- /* 32 bit has nosave before _edata */
29017 NOSAVE_DATA
29018-#endif
29019
29020 PAGE_ALIGNED_DATA(PAGE_SIZE)
29021
29022@@ -174,12 +232,19 @@ SECTIONS
29023 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
29024
29025 /* Init code and data - will be freed after init */
29026- . = ALIGN(PAGE_SIZE);
29027 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
29028+ BYTE(0)
29029+
29030+#ifdef CONFIG_PAX_KERNEXEC
29031+ . = ALIGN(HPAGE_SIZE);
29032+#else
29033+ . = ALIGN(PAGE_SIZE);
29034+#endif
29035+
29036 __init_begin = .; /* paired with __init_end */
29037- }
29038+ } :init.begin
29039
29040-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
29041+#ifdef CONFIG_SMP
29042 /*
29043 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
29044 * output PHDR, so the next output section - .init.text - should
29045@@ -188,12 +253,27 @@ SECTIONS
29046 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
29047 #endif
29048
29049- INIT_TEXT_SECTION(PAGE_SIZE)
29050-#ifdef CONFIG_X86_64
29051- :init
29052-#endif
29053+ . = ALIGN(PAGE_SIZE);
29054+ init_begin = .;
29055+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
29056+ VMLINUX_SYMBOL(_sinittext) = .;
29057+ INIT_TEXT
29058+ VMLINUX_SYMBOL(_einittext) = .;
29059+ . = ALIGN(PAGE_SIZE);
29060+ } :text.init
29061
29062- INIT_DATA_SECTION(16)
29063+ /*
29064+ * .exit.text is discard at runtime, not link time, to deal with
29065+ * references from .altinstructions and .eh_frame
29066+ */
29067+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
29068+ EXIT_TEXT
29069+ . = ALIGN(16);
29070+ } :text.exit
29071+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
29072+
29073+ . = ALIGN(PAGE_SIZE);
29074+ INIT_DATA_SECTION(16) :init
29075
29076 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
29077 __x86_cpu_dev_start = .;
29078@@ -264,19 +344,12 @@ SECTIONS
29079 }
29080
29081 . = ALIGN(8);
29082- /*
29083- * .exit.text is discard at runtime, not link time, to deal with
29084- * references from .altinstructions and .eh_frame
29085- */
29086- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
29087- EXIT_TEXT
29088- }
29089
29090 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
29091 EXIT_DATA
29092 }
29093
29094-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
29095+#ifndef CONFIG_SMP
29096 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
29097 #endif
29098
29099@@ -295,16 +368,10 @@ SECTIONS
29100 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
29101 __smp_locks = .;
29102 *(.smp_locks)
29103- . = ALIGN(PAGE_SIZE);
29104 __smp_locks_end = .;
29105+ . = ALIGN(PAGE_SIZE);
29106 }
29107
29108-#ifdef CONFIG_X86_64
29109- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
29110- NOSAVE_DATA
29111- }
29112-#endif
29113-
29114 /* BSS */
29115 . = ALIGN(PAGE_SIZE);
29116 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
29117@@ -320,6 +387,7 @@ SECTIONS
29118 __brk_base = .;
29119 . += 64 * 1024; /* 64k alignment slop space */
29120 *(.brk_reservation) /* areas brk users have reserved */
29121+ . = ALIGN(HPAGE_SIZE);
29122 __brk_limit = .;
29123 }
29124
29125@@ -346,13 +414,12 @@ SECTIONS
29126 * for the boot processor.
29127 */
29128 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
29129-INIT_PER_CPU(gdt_page);
29130 INIT_PER_CPU(irq_stack_union);
29131
29132 /*
29133 * Build-time check on the image size:
29134 */
29135-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
29136+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
29137 "kernel image bigger than KERNEL_IMAGE_SIZE");
29138
29139 #ifdef CONFIG_SMP
29140diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
29141index e1e1e80..1400089 100644
29142--- a/arch/x86/kernel/vsyscall_64.c
29143+++ b/arch/x86/kernel/vsyscall_64.c
29144@@ -54,15 +54,13 @@
29145
29146 DEFINE_VVAR(int, vgetcpu_mode);
29147
29148-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
29149+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
29150
29151 static int __init vsyscall_setup(char *str)
29152 {
29153 if (str) {
29154 if (!strcmp("emulate", str))
29155 vsyscall_mode = EMULATE;
29156- else if (!strcmp("native", str))
29157- vsyscall_mode = NATIVE;
29158 else if (!strcmp("none", str))
29159 vsyscall_mode = NONE;
29160 else
29161@@ -279,8 +277,7 @@ do_ret:
29162 return true;
29163
29164 sigsegv:
29165- force_sig(SIGSEGV, current);
29166- return true;
29167+ do_group_exit(SIGKILL);
29168 }
29169
29170 /*
29171@@ -331,10 +328,7 @@ void __init map_vsyscall(void)
29172 extern char __vsyscall_page;
29173 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
29174
29175- __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
29176- vsyscall_mode == NATIVE
29177- ? PAGE_KERNEL_VSYSCALL
29178- : PAGE_KERNEL_VVAR);
29179+ __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
29180 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
29181 (unsigned long)VSYSCALL_ADDR);
29182 }
29183diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
29184index 04068192..4d75aa6 100644
29185--- a/arch/x86/kernel/x8664_ksyms_64.c
29186+++ b/arch/x86/kernel/x8664_ksyms_64.c
29187@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
29188 EXPORT_SYMBOL(copy_user_generic_unrolled);
29189 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
29190 EXPORT_SYMBOL(__copy_user_nocache);
29191-EXPORT_SYMBOL(_copy_from_user);
29192-EXPORT_SYMBOL(_copy_to_user);
29193
29194 EXPORT_SYMBOL(copy_page);
29195 EXPORT_SYMBOL(clear_page);
29196@@ -73,3 +71,7 @@ EXPORT_SYMBOL(___preempt_schedule);
29197 EXPORT_SYMBOL(___preempt_schedule_context);
29198 #endif
29199 #endif
29200+
29201+#ifdef CONFIG_PAX_PER_CPU_PGD
29202+EXPORT_SYMBOL(cpu_pgd);
29203+#endif
29204diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
29205index e48b674..a451dd9 100644
29206--- a/arch/x86/kernel/x86_init.c
29207+++ b/arch/x86/kernel/x86_init.c
29208@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
29209 static void default_nmi_init(void) { };
29210 static int default_i8042_detect(void) { return 1; };
29211
29212-struct x86_platform_ops x86_platform = {
29213+struct x86_platform_ops x86_platform __read_only = {
29214 .calibrate_tsc = native_calibrate_tsc,
29215 .get_wallclock = mach_get_cmos_time,
29216 .set_wallclock = mach_set_rtc_mmss,
29217@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
29218 EXPORT_SYMBOL_GPL(x86_platform);
29219
29220 #if defined(CONFIG_PCI_MSI)
29221-struct x86_msi_ops x86_msi = {
29222+struct x86_msi_ops x86_msi __read_only = {
29223 .setup_msi_irqs = native_setup_msi_irqs,
29224 .compose_msi_msg = native_compose_msi_msg,
29225 .teardown_msi_irq = native_teardown_msi_irq,
29226@@ -150,7 +150,7 @@ u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag)
29227 }
29228 #endif
29229
29230-struct x86_io_apic_ops x86_io_apic_ops = {
29231+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
29232 .init = native_io_apic_init_mappings,
29233 .read = native_io_apic_read,
29234 .write = native_io_apic_write,
29235diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
29236index 940b142..0ad3a10 100644
29237--- a/arch/x86/kernel/xsave.c
29238+++ b/arch/x86/kernel/xsave.c
29239@@ -167,18 +167,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
29240
29241 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
29242 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
29243- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
29244+ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
29245
29246 if (!use_xsave())
29247 return err;
29248
29249- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
29250+ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
29251
29252 /*
29253 * Read the xstate_bv which we copied (directly from the cpu or
29254 * from the state in task struct) to the user buffers.
29255 */
29256- err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
29257+ err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
29258
29259 /*
29260 * For legacy compatible, we always set FP/SSE bits in the bit
29261@@ -193,7 +193,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
29262 */
29263 xstate_bv |= XSTATE_FPSSE;
29264
29265- err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
29266+ err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
29267
29268 return err;
29269 }
29270@@ -202,6 +202,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
29271 {
29272 int err;
29273
29274+ buf = (struct xsave_struct __user *)____m(buf);
29275 if (use_xsave())
29276 err = xsave_user(buf);
29277 else if (use_fxsr())
29278@@ -314,6 +315,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
29279 */
29280 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
29281 {
29282+ buf = (void __user *)____m(buf);
29283 if (use_xsave()) {
29284 if ((unsigned long)buf % 64 || fx_only) {
29285 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
29286diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
29287index 38a0afe..94421a9 100644
29288--- a/arch/x86/kvm/cpuid.c
29289+++ b/arch/x86/kvm/cpuid.c
29290@@ -166,15 +166,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
29291 struct kvm_cpuid2 *cpuid,
29292 struct kvm_cpuid_entry2 __user *entries)
29293 {
29294- int r;
29295+ int r, i;
29296
29297 r = -E2BIG;
29298 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
29299 goto out;
29300 r = -EFAULT;
29301- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
29302- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
29303+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
29304 goto out;
29305+ for (i = 0; i < cpuid->nent; ++i) {
29306+ struct kvm_cpuid_entry2 cpuid_entry;
29307+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
29308+ goto out;
29309+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
29310+ }
29311 vcpu->arch.cpuid_nent = cpuid->nent;
29312 kvm_apic_set_version(vcpu);
29313 kvm_x86_ops->cpuid_update(vcpu);
29314@@ -189,15 +194,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
29315 struct kvm_cpuid2 *cpuid,
29316 struct kvm_cpuid_entry2 __user *entries)
29317 {
29318- int r;
29319+ int r, i;
29320
29321 r = -E2BIG;
29322 if (cpuid->nent < vcpu->arch.cpuid_nent)
29323 goto out;
29324 r = -EFAULT;
29325- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
29326- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
29327+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
29328 goto out;
29329+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
29330+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
29331+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
29332+ goto out;
29333+ }
29334 return 0;
29335
29336 out:
29337diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
29338index 03954f7..0f4ad73 100644
29339--- a/arch/x86/kvm/emulate.c
29340+++ b/arch/x86/kvm/emulate.c
29341@@ -504,11 +504,6 @@ static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
29342 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
29343 }
29344
29345-static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
29346-{
29347- register_address_increment(ctxt, &ctxt->_eip, rel);
29348-}
29349-
29350 static u32 desc_limit_scaled(struct desc_struct *desc)
29351 {
29352 u32 limit = get_desc_limit(desc);
29353@@ -568,6 +563,40 @@ static int emulate_nm(struct x86_emulate_ctxt *ctxt)
29354 return emulate_exception(ctxt, NM_VECTOR, 0, false);
29355 }
29356
29357+static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
29358+ int cs_l)
29359+{
29360+ switch (ctxt->op_bytes) {
29361+ case 2:
29362+ ctxt->_eip = (u16)dst;
29363+ break;
29364+ case 4:
29365+ ctxt->_eip = (u32)dst;
29366+ break;
29367+#ifdef CONFIG_X86_64
29368+ case 8:
29369+ if ((cs_l && is_noncanonical_address(dst)) ||
29370+ (!cs_l && (dst >> 32) != 0))
29371+ return emulate_gp(ctxt, 0);
29372+ ctxt->_eip = dst;
29373+ break;
29374+#endif
29375+ default:
29376+ WARN(1, "unsupported eip assignment size\n");
29377+ }
29378+ return X86EMUL_CONTINUE;
29379+}
29380+
29381+static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
29382+{
29383+ return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
29384+}
29385+
29386+static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
29387+{
29388+ return assign_eip_near(ctxt, ctxt->_eip + rel);
29389+}
29390+
29391 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
29392 {
29393 u16 selector;
29394@@ -750,8 +779,10 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
29395 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
29396 unsigned size)
29397 {
29398- if (unlikely(ctxt->fetch.end - ctxt->fetch.ptr < size))
29399- return __do_insn_fetch_bytes(ctxt, size);
29400+ unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
29401+
29402+ if (unlikely(done_size < size))
29403+ return __do_insn_fetch_bytes(ctxt, size - done_size);
29404 else
29405 return X86EMUL_CONTINUE;
29406 }
29407@@ -1415,7 +1446,9 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
29408
29409 /* Does not support long mode */
29410 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
29411- u16 selector, int seg, u8 cpl, bool in_task_switch)
29412+ u16 selector, int seg, u8 cpl,
29413+ bool in_task_switch,
29414+ struct desc_struct *desc)
29415 {
29416 struct desc_struct seg_desc, old_desc;
29417 u8 dpl, rpl;
29418@@ -1547,6 +1580,8 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
29419 }
29420 load:
29421 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
29422+ if (desc)
29423+ *desc = seg_desc;
29424 return X86EMUL_CONTINUE;
29425 exception:
29426 emulate_exception(ctxt, err_vec, err_code, true);
29427@@ -1557,7 +1592,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
29428 u16 selector, int seg)
29429 {
29430 u8 cpl = ctxt->ops->cpl(ctxt);
29431- return __load_segment_descriptor(ctxt, selector, seg, cpl, false);
29432+ return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
29433 }
29434
29435 static void write_register_operand(struct operand *op)
29436@@ -1951,17 +1986,31 @@ static int em_iret(struct x86_emulate_ctxt *ctxt)
29437 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
29438 {
29439 int rc;
29440- unsigned short sel;
29441+ unsigned short sel, old_sel;
29442+ struct desc_struct old_desc, new_desc;
29443+ const struct x86_emulate_ops *ops = ctxt->ops;
29444+ u8 cpl = ctxt->ops->cpl(ctxt);
29445+
29446+ /* Assignment of RIP may only fail in 64-bit mode */
29447+ if (ctxt->mode == X86EMUL_MODE_PROT64)
29448+ ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
29449+ VCPU_SREG_CS);
29450
29451 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
29452
29453- rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
29454+ rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
29455+ &new_desc);
29456 if (rc != X86EMUL_CONTINUE)
29457 return rc;
29458
29459- ctxt->_eip = 0;
29460- memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
29461- return X86EMUL_CONTINUE;
29462+ rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
29463+ if (rc != X86EMUL_CONTINUE) {
29464+ WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
29465+ /* assigning eip failed; restore the old cs */
29466+ ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
29467+ return rc;
29468+ }
29469+ return rc;
29470 }
29471
29472 static int em_grp45(struct x86_emulate_ctxt *ctxt)
29473@@ -1972,13 +2021,15 @@ static int em_grp45(struct x86_emulate_ctxt *ctxt)
29474 case 2: /* call near abs */ {
29475 long int old_eip;
29476 old_eip = ctxt->_eip;
29477- ctxt->_eip = ctxt->src.val;
29478+ rc = assign_eip_near(ctxt, ctxt->src.val);
29479+ if (rc != X86EMUL_CONTINUE)
29480+ break;
29481 ctxt->src.val = old_eip;
29482 rc = em_push(ctxt);
29483 break;
29484 }
29485 case 4: /* jmp abs */
29486- ctxt->_eip = ctxt->src.val;
29487+ rc = assign_eip_near(ctxt, ctxt->src.val);
29488 break;
29489 case 5: /* jmp far */
29490 rc = em_jmp_far(ctxt);
29491@@ -2013,30 +2064,47 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
29492
29493 static int em_ret(struct x86_emulate_ctxt *ctxt)
29494 {
29495- ctxt->dst.type = OP_REG;
29496- ctxt->dst.addr.reg = &ctxt->_eip;
29497- ctxt->dst.bytes = ctxt->op_bytes;
29498- return em_pop(ctxt);
29499+ int rc;
29500+ unsigned long eip;
29501+
29502+ rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
29503+ if (rc != X86EMUL_CONTINUE)
29504+ return rc;
29505+
29506+ return assign_eip_near(ctxt, eip);
29507 }
29508
29509 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
29510 {
29511 int rc;
29512- unsigned long cs;
29513+ unsigned long eip, cs;
29514+ u16 old_cs;
29515 int cpl = ctxt->ops->cpl(ctxt);
29516+ struct desc_struct old_desc, new_desc;
29517+ const struct x86_emulate_ops *ops = ctxt->ops;
29518
29519- rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
29520+ if (ctxt->mode == X86EMUL_MODE_PROT64)
29521+ ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
29522+ VCPU_SREG_CS);
29523+
29524+ rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
29525 if (rc != X86EMUL_CONTINUE)
29526 return rc;
29527- if (ctxt->op_bytes == 4)
29528- ctxt->_eip = (u32)ctxt->_eip;
29529 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
29530 if (rc != X86EMUL_CONTINUE)
29531 return rc;
29532 /* Outer-privilege level return is not implemented */
29533 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
29534 return X86EMUL_UNHANDLEABLE;
29535- rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
29536+ rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
29537+ &new_desc);
29538+ if (rc != X86EMUL_CONTINUE)
29539+ return rc;
29540+ rc = assign_eip_far(ctxt, eip, new_desc.l);
29541+ if (rc != X86EMUL_CONTINUE) {
29542+ WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
29543+ ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
29544+ }
29545 return rc;
29546 }
29547
29548@@ -2297,7 +2365,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
29549 {
29550 const struct x86_emulate_ops *ops = ctxt->ops;
29551 struct desc_struct cs, ss;
29552- u64 msr_data;
29553+ u64 msr_data, rcx, rdx;
29554 int usermode;
29555 u16 cs_sel = 0, ss_sel = 0;
29556
29557@@ -2313,6 +2381,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
29558 else
29559 usermode = X86EMUL_MODE_PROT32;
29560
29561+ rcx = reg_read(ctxt, VCPU_REGS_RCX);
29562+ rdx = reg_read(ctxt, VCPU_REGS_RDX);
29563+
29564 cs.dpl = 3;
29565 ss.dpl = 3;
29566 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
29567@@ -2330,6 +2401,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
29568 ss_sel = cs_sel + 8;
29569 cs.d = 0;
29570 cs.l = 1;
29571+ if (is_noncanonical_address(rcx) ||
29572+ is_noncanonical_address(rdx))
29573+ return emulate_gp(ctxt, 0);
29574 break;
29575 }
29576 cs_sel |= SELECTOR_RPL_MASK;
29577@@ -2338,8 +2412,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
29578 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
29579 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
29580
29581- ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX);
29582- *reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX);
29583+ ctxt->_eip = rdx;
29584+ *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
29585
29586 return X86EMUL_CONTINUE;
29587 }
29588@@ -2457,19 +2531,24 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
29589 * Now load segment descriptors. If fault happens at this stage
29590 * it is handled in a context of new task
29591 */
29592- ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, true);
29593+ ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
29594+ true, NULL);
29595 if (ret != X86EMUL_CONTINUE)
29596 return ret;
29597- ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true);
29598+ ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
29599+ true, NULL);
29600 if (ret != X86EMUL_CONTINUE)
29601 return ret;
29602- ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true);
29603+ ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
29604+ true, NULL);
29605 if (ret != X86EMUL_CONTINUE)
29606 return ret;
29607- ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true);
29608+ ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
29609+ true, NULL);
29610 if (ret != X86EMUL_CONTINUE)
29611 return ret;
29612- ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true);
29613+ ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
29614+ true, NULL);
29615 if (ret != X86EMUL_CONTINUE)
29616 return ret;
29617
29618@@ -2594,25 +2673,32 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
29619 * Now load segment descriptors. If fault happenes at this stage
29620 * it is handled in a context of new task
29621 */
29622- ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl, true);
29623+ ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
29624+ cpl, true, NULL);
29625 if (ret != X86EMUL_CONTINUE)
29626 return ret;
29627- ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true);
29628+ ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
29629+ true, NULL);
29630 if (ret != X86EMUL_CONTINUE)
29631 return ret;
29632- ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true);
29633+ ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
29634+ true, NULL);
29635 if (ret != X86EMUL_CONTINUE)
29636 return ret;
29637- ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true);
29638+ ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
29639+ true, NULL);
29640 if (ret != X86EMUL_CONTINUE)
29641 return ret;
29642- ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true);
29643+ ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
29644+ true, NULL);
29645 if (ret != X86EMUL_CONTINUE)
29646 return ret;
29647- ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, true);
29648+ ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
29649+ true, NULL);
29650 if (ret != X86EMUL_CONTINUE)
29651 return ret;
29652- ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, true);
29653+ ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
29654+ true, NULL);
29655 if (ret != X86EMUL_CONTINUE)
29656 return ret;
29657
29658@@ -2880,10 +2966,13 @@ static int em_aad(struct x86_emulate_ctxt *ctxt)
29659
29660 static int em_call(struct x86_emulate_ctxt *ctxt)
29661 {
29662+ int rc;
29663 long rel = ctxt->src.val;
29664
29665 ctxt->src.val = (unsigned long)ctxt->_eip;
29666- jmp_rel(ctxt, rel);
29667+ rc = jmp_rel(ctxt, rel);
29668+ if (rc != X86EMUL_CONTINUE)
29669+ return rc;
29670 return em_push(ctxt);
29671 }
29672
29673@@ -2892,34 +2981,50 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
29674 u16 sel, old_cs;
29675 ulong old_eip;
29676 int rc;
29677+ struct desc_struct old_desc, new_desc;
29678+ const struct x86_emulate_ops *ops = ctxt->ops;
29679+ int cpl = ctxt->ops->cpl(ctxt);
29680
29681- old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
29682 old_eip = ctxt->_eip;
29683+ ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
29684
29685 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
29686- if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
29687+ rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
29688+ &new_desc);
29689+ if (rc != X86EMUL_CONTINUE)
29690 return X86EMUL_CONTINUE;
29691
29692- ctxt->_eip = 0;
29693- memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
29694+ rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
29695+ if (rc != X86EMUL_CONTINUE)
29696+ goto fail;
29697
29698 ctxt->src.val = old_cs;
29699 rc = em_push(ctxt);
29700 if (rc != X86EMUL_CONTINUE)
29701- return rc;
29702+ goto fail;
29703
29704 ctxt->src.val = old_eip;
29705- return em_push(ctxt);
29706+ rc = em_push(ctxt);
29707+ /* If we failed, we tainted the memory, but the very least we should
29708+ restore cs */
29709+ if (rc != X86EMUL_CONTINUE)
29710+ goto fail;
29711+ return rc;
29712+fail:
29713+ ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
29714+ return rc;
29715+
29716 }
29717
29718 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
29719 {
29720 int rc;
29721+ unsigned long eip;
29722
29723- ctxt->dst.type = OP_REG;
29724- ctxt->dst.addr.reg = &ctxt->_eip;
29725- ctxt->dst.bytes = ctxt->op_bytes;
29726- rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
29727+ rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
29728+ if (rc != X86EMUL_CONTINUE)
29729+ return rc;
29730+ rc = assign_eip_near(ctxt, eip);
29731 if (rc != X86EMUL_CONTINUE)
29732 return rc;
29733 rsp_increment(ctxt, ctxt->src.val);
29734@@ -3250,20 +3355,24 @@ static int em_lmsw(struct x86_emulate_ctxt *ctxt)
29735
29736 static int em_loop(struct x86_emulate_ctxt *ctxt)
29737 {
29738+ int rc = X86EMUL_CONTINUE;
29739+
29740 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
29741 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
29742 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
29743- jmp_rel(ctxt, ctxt->src.val);
29744+ rc = jmp_rel(ctxt, ctxt->src.val);
29745
29746- return X86EMUL_CONTINUE;
29747+ return rc;
29748 }
29749
29750 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
29751 {
29752+ int rc = X86EMUL_CONTINUE;
29753+
29754 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
29755- jmp_rel(ctxt, ctxt->src.val);
29756+ rc = jmp_rel(ctxt, ctxt->src.val);
29757
29758- return X86EMUL_CONTINUE;
29759+ return rc;
29760 }
29761
29762 static int em_in(struct x86_emulate_ctxt *ctxt)
29763@@ -3351,6 +3460,12 @@ static int em_bswap(struct x86_emulate_ctxt *ctxt)
29764 return X86EMUL_CONTINUE;
29765 }
29766
29767+static int em_clflush(struct x86_emulate_ctxt *ctxt)
29768+{
29769+ /* emulating clflush regardless of cpuid */
29770+ return X86EMUL_CONTINUE;
29771+}
29772+
29773 static bool valid_cr(int nr)
29774 {
29775 switch (nr) {
29776@@ -3683,6 +3798,16 @@ static const struct opcode group11[] = {
29777 X7(D(Undefined)),
29778 };
29779
29780+static const struct gprefix pfx_0f_ae_7 = {
29781+ I(SrcMem | ByteOp, em_clflush), N, N, N,
29782+};
29783+
29784+static const struct group_dual group15 = { {
29785+ N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
29786+}, {
29787+ N, N, N, N, N, N, N, N,
29788+} };
29789+
29790 static const struct gprefix pfx_0f_6f_0f_7f = {
29791 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
29792 };
29793@@ -3887,10 +4012,11 @@ static const struct opcode twobyte_table[256] = {
29794 N, I(ImplicitOps | EmulateOnUD, em_syscall),
29795 II(ImplicitOps | Priv, em_clts, clts), N,
29796 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
29797- N, D(ImplicitOps | ModRM), N, N,
29798+ N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
29799 /* 0x10 - 0x1F */
29800 N, N, N, N, N, N, N, N,
29801- D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM),
29802+ D(ImplicitOps | ModRM | SrcMem | NoAccess),
29803+ N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
29804 /* 0x20 - 0x2F */
29805 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
29806 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
29807@@ -3942,7 +4068,7 @@ static const struct opcode twobyte_table[256] = {
29808 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
29809 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
29810 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
29811- D(ModRM), F(DstReg | SrcMem | ModRM, em_imul),
29812+ GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
29813 /* 0xB0 - 0xB7 */
29814 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
29815 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
29816@@ -4458,10 +4584,10 @@ done_prefixes:
29817 /* Decode and fetch the destination operand: register or memory. */
29818 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
29819
29820-done:
29821 if (ctxt->rip_relative)
29822 ctxt->memopp->addr.mem.ea += ctxt->_eip;
29823
29824+done:
29825 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
29826 }
29827
29828@@ -4711,7 +4837,7 @@ special_insn:
29829 break;
29830 case 0x70 ... 0x7f: /* jcc (short) */
29831 if (test_cc(ctxt->b, ctxt->eflags))
29832- jmp_rel(ctxt, ctxt->src.val);
29833+ rc = jmp_rel(ctxt, ctxt->src.val);
29834 break;
29835 case 0x8d: /* lea r16/r32, m */
29836 ctxt->dst.val = ctxt->src.addr.mem.ea;
29837@@ -4741,7 +4867,7 @@ special_insn:
29838 break;
29839 case 0xe9: /* jmp rel */
29840 case 0xeb: /* jmp rel short */
29841- jmp_rel(ctxt, ctxt->src.val);
29842+ rc = jmp_rel(ctxt, ctxt->src.val);
29843 ctxt->dst.type = OP_NONE; /* Disable writeback. */
29844 break;
29845 case 0xf4: /* hlt */
29846@@ -4864,13 +4990,11 @@ twobyte_insn:
29847 break;
29848 case 0x80 ... 0x8f: /* jnz rel, etc*/
29849 if (test_cc(ctxt->b, ctxt->eflags))
29850- jmp_rel(ctxt, ctxt->src.val);
29851+ rc = jmp_rel(ctxt, ctxt->src.val);
29852 break;
29853 case 0x90 ... 0x9f: /* setcc r/m8 */
29854 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
29855 break;
29856- case 0xae: /* clflush */
29857- break;
29858 case 0xb6 ... 0xb7: /* movzx */
29859 ctxt->dst.bytes = ctxt->op_bytes;
29860 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
29861diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
29862index 518d864..298781d 100644
29863--- a/arch/x86/kvm/i8254.c
29864+++ b/arch/x86/kvm/i8254.c
29865@@ -262,8 +262,10 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
29866 return;
29867
29868 timer = &pit->pit_state.timer;
29869+ mutex_lock(&pit->pit_state.lock);
29870 if (hrtimer_cancel(timer))
29871 hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
29872+ mutex_unlock(&pit->pit_state.lock);
29873 }
29874
29875 static void destroy_pit_timer(struct kvm_pit *pit)
29876diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
29877index 08e8a89..0e9183e 100644
29878--- a/arch/x86/kvm/lapic.c
29879+++ b/arch/x86/kvm/lapic.c
29880@@ -55,7 +55,7 @@
29881 #define APIC_BUS_CYCLE_NS 1
29882
29883 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
29884-#define apic_debug(fmt, arg...)
29885+#define apic_debug(fmt, arg...) do {} while (0)
29886
29887 #define APIC_LVT_NUM 6
29888 /* 14 is the version for Xeon and Pentium 8.4.8*/
29889diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
29890index 4107765..d9eb358 100644
29891--- a/arch/x86/kvm/paging_tmpl.h
29892+++ b/arch/x86/kvm/paging_tmpl.h
29893@@ -331,7 +331,7 @@ retry_walk:
29894 if (unlikely(kvm_is_error_hva(host_addr)))
29895 goto error;
29896
29897- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
29898+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
29899 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
29900 goto error;
29901 walker->ptep_user[walker->level - 1] = ptep_user;
29902diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
29903index ddf7427..fd84599 100644
29904--- a/arch/x86/kvm/svm.c
29905+++ b/arch/x86/kvm/svm.c
29906@@ -3234,7 +3234,7 @@ static int wrmsr_interception(struct vcpu_svm *svm)
29907 msr.host_initiated = false;
29908
29909 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
29910- if (svm_set_msr(&svm->vcpu, &msr)) {
29911+ if (kvm_set_msr(&svm->vcpu, &msr)) {
29912 trace_kvm_msr_write_ex(ecx, data);
29913 kvm_inject_gp(&svm->vcpu, 0);
29914 } else {
29915@@ -3534,9 +3534,9 @@ static int handle_exit(struct kvm_vcpu *vcpu)
29916
29917 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
29918 || !svm_exit_handlers[exit_code]) {
29919- kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
29920- kvm_run->hw.hardware_exit_reason = exit_code;
29921- return 0;
29922+ WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_code);
29923+ kvm_queue_exception(vcpu, UD_VECTOR);
29924+ return 1;
29925 }
29926
29927 return svm_exit_handlers[exit_code](svm);
29928@@ -3547,7 +3547,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
29929 int cpu = raw_smp_processor_id();
29930
29931 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
29932+
29933+ pax_open_kernel();
29934 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
29935+ pax_close_kernel();
29936+
29937 load_TR_desc();
29938 }
29939
29940@@ -3948,6 +3952,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
29941 #endif
29942 #endif
29943
29944+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
29945+ __set_fs(current_thread_info()->addr_limit);
29946+#endif
29947+
29948 reload_tss(vcpu);
29949
29950 local_irq_disable();
29951diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
29952index 6a118fa..c0b3c00 100644
29953--- a/arch/x86/kvm/vmx.c
29954+++ b/arch/x86/kvm/vmx.c
29955@@ -1341,12 +1341,12 @@ static void vmcs_write64(unsigned long field, u64 value)
29956 #endif
29957 }
29958
29959-static void vmcs_clear_bits(unsigned long field, u32 mask)
29960+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
29961 {
29962 vmcs_writel(field, vmcs_readl(field) & ~mask);
29963 }
29964
29965-static void vmcs_set_bits(unsigned long field, u32 mask)
29966+static void vmcs_set_bits(unsigned long field, unsigned long mask)
29967 {
29968 vmcs_writel(field, vmcs_readl(field) | mask);
29969 }
29970@@ -1606,7 +1606,11 @@ static void reload_tss(void)
29971 struct desc_struct *descs;
29972
29973 descs = (void *)gdt->address;
29974+
29975+ pax_open_kernel();
29976 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
29977+ pax_close_kernel();
29978+
29979 load_TR_desc();
29980 }
29981
29982@@ -1834,6 +1838,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
29983 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
29984 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
29985
29986+#ifdef CONFIG_PAX_PER_CPU_PGD
29987+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
29988+#endif
29989+
29990 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
29991 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
29992 vmx->loaded_vmcs->cpu = cpu;
29993@@ -2123,7 +2131,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
29994 * reads and returns guest's timestamp counter "register"
29995 * guest_tsc = host_tsc + tsc_offset -- 21.3
29996 */
29997-static u64 guest_read_tsc(void)
29998+static u64 __intentional_overflow(-1) guest_read_tsc(void)
29999 {
30000 u64 host_tsc, tsc_offset;
30001
30002@@ -2632,12 +2640,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
30003 default:
30004 msr = find_msr_entry(vmx, msr_index);
30005 if (msr) {
30006+ u64 old_msr_data = msr->data;
30007 msr->data = data;
30008 if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
30009 preempt_disable();
30010- kvm_set_shared_msr(msr->index, msr->data,
30011- msr->mask);
30012+ ret = kvm_set_shared_msr(msr->index, msr->data,
30013+ msr->mask);
30014 preempt_enable();
30015+ if (ret)
30016+ msr->data = old_msr_data;
30017 }
30018 break;
30019 }
30020@@ -3111,8 +3122,11 @@ static __init int hardware_setup(void)
30021 if (!cpu_has_vmx_flexpriority())
30022 flexpriority_enabled = 0;
30023
30024- if (!cpu_has_vmx_tpr_shadow())
30025- kvm_x86_ops->update_cr8_intercept = NULL;
30026+ if (!cpu_has_vmx_tpr_shadow()) {
30027+ pax_open_kernel();
30028+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
30029+ pax_close_kernel();
30030+ }
30031
30032 if (enable_ept && !cpu_has_vmx_ept_2m_page())
30033 kvm_disable_largepages();
30034@@ -3123,13 +3137,15 @@ static __init int hardware_setup(void)
30035 if (!cpu_has_vmx_apicv())
30036 enable_apicv = 0;
30037
30038+ pax_open_kernel();
30039 if (enable_apicv)
30040- kvm_x86_ops->update_cr8_intercept = NULL;
30041+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
30042 else {
30043- kvm_x86_ops->hwapic_irr_update = NULL;
30044- kvm_x86_ops->deliver_posted_interrupt = NULL;
30045- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
30046+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
30047+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
30048+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
30049 }
30050+ pax_close_kernel();
30051
30052 if (nested)
30053 nested_vmx_setup_ctls_msrs();
30054@@ -4239,7 +4255,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
30055 unsigned long cr4;
30056
30057 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
30058+
30059+#ifndef CONFIG_PAX_PER_CPU_PGD
30060 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
30061+#endif
30062
30063 /* Save the most likely value for this task's CR4 in the VMCS. */
30064 cr4 = read_cr4();
30065@@ -4266,7 +4285,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
30066 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
30067 vmx->host_idt_base = dt.address;
30068
30069- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
30070+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
30071
30072 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
30073 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
30074@@ -5263,7 +5282,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
30075 msr.data = data;
30076 msr.index = ecx;
30077 msr.host_initiated = false;
30078- if (vmx_set_msr(vcpu, &msr) != 0) {
30079+ if (kvm_set_msr(vcpu, &msr) != 0) {
30080 trace_kvm_msr_write_ex(ecx, data);
30081 kvm_inject_gp(vcpu, 0);
30082 return 1;
30083@@ -6636,6 +6655,12 @@ static int handle_invept(struct kvm_vcpu *vcpu)
30084 return 1;
30085 }
30086
30087+static int handle_invvpid(struct kvm_vcpu *vcpu)
30088+{
30089+ kvm_queue_exception(vcpu, UD_VECTOR);
30090+ return 1;
30091+}
30092+
30093 /*
30094 * The exit handlers return 1 if the exit was handled fully and guest execution
30095 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
30096@@ -6681,6 +6706,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
30097 [EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait,
30098 [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor,
30099 [EXIT_REASON_INVEPT] = handle_invept,
30100+ [EXIT_REASON_INVVPID] = handle_invvpid,
30101 };
30102
30103 static const int kvm_vmx_max_exit_handlers =
30104@@ -6914,7 +6940,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
30105 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
30106 case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
30107 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
30108- case EXIT_REASON_INVEPT:
30109+ case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
30110 /*
30111 * VMX instructions trap unconditionally. This allows L1 to
30112 * emulate them for its L2 guest, i.e., allows 3-level nesting!
30113@@ -7055,10 +7081,10 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
30114 && kvm_vmx_exit_handlers[exit_reason])
30115 return kvm_vmx_exit_handlers[exit_reason](vcpu);
30116 else {
30117- vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
30118- vcpu->run->hw.hardware_exit_reason = exit_reason;
30119+ WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason);
30120+ kvm_queue_exception(vcpu, UD_VECTOR);
30121+ return 1;
30122 }
30123- return 0;
30124 }
30125
30126 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
30127@@ -7465,6 +7491,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
30128 "jmp 2f \n\t"
30129 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
30130 "2: "
30131+
30132+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30133+ "ljmp %[cs],$3f\n\t"
30134+ "3: "
30135+#endif
30136+
30137 /* Save guest registers, load host registers, keep flags */
30138 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
30139 "pop %0 \n\t"
30140@@ -7517,6 +7549,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
30141 #endif
30142 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
30143 [wordsize]"i"(sizeof(ulong))
30144+
30145+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30146+ ,[cs]"i"(__KERNEL_CS)
30147+#endif
30148+
30149 : "cc", "memory"
30150 #ifdef CONFIG_X86_64
30151 , "rax", "rbx", "rdi", "rsi"
30152@@ -7530,7 +7567,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
30153 if (debugctlmsr)
30154 update_debugctlmsr(debugctlmsr);
30155
30156-#ifndef CONFIG_X86_64
30157+#ifdef CONFIG_X86_32
30158 /*
30159 * The sysexit path does not restore ds/es, so we must set them to
30160 * a reasonable value ourselves.
30161@@ -7539,8 +7576,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
30162 * may be executed in interrupt context, which saves and restore segments
30163 * around it, nullifying its effect.
30164 */
30165- loadsegment(ds, __USER_DS);
30166- loadsegment(es, __USER_DS);
30167+ loadsegment(ds, __KERNEL_DS);
30168+ loadsegment(es, __KERNEL_DS);
30169+ loadsegment(ss, __KERNEL_DS);
30170+
30171+#ifdef CONFIG_PAX_KERNEXEC
30172+ loadsegment(fs, __KERNEL_PERCPU);
30173+#endif
30174+
30175+#ifdef CONFIG_PAX_MEMORY_UDEREF
30176+ __set_fs(current_thread_info()->addr_limit);
30177+#endif
30178+
30179 #endif
30180
30181 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
30182diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
30183index 8f1e22d..c23d3c5 100644
30184--- a/arch/x86/kvm/x86.c
30185+++ b/arch/x86/kvm/x86.c
30186@@ -229,20 +229,25 @@ static void kvm_shared_msr_cpu_online(void)
30187 shared_msr_update(i, shared_msrs_global.msrs[i]);
30188 }
30189
30190-void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
30191+int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
30192 {
30193 unsigned int cpu = smp_processor_id();
30194 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
30195+ int err;
30196
30197 if (((value ^ smsr->values[slot].curr) & mask) == 0)
30198- return;
30199+ return 0;
30200 smsr->values[slot].curr = value;
30201- wrmsrl(shared_msrs_global.msrs[slot], value);
30202+ err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
30203+ if (err)
30204+ return 1;
30205+
30206 if (!smsr->registered) {
30207 smsr->urn.on_user_return = kvm_on_user_return;
30208 user_return_notifier_register(&smsr->urn);
30209 smsr->registered = true;
30210 }
30211+ return 0;
30212 }
30213 EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
30214
30215@@ -984,7 +989,6 @@ void kvm_enable_efer_bits(u64 mask)
30216 }
30217 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
30218
30219-
30220 /*
30221 * Writes msr value into into the appropriate "register".
30222 * Returns 0 on success, non-0 otherwise.
30223@@ -992,8 +996,34 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
30224 */
30225 int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
30226 {
30227+ switch (msr->index) {
30228+ case MSR_FS_BASE:
30229+ case MSR_GS_BASE:
30230+ case MSR_KERNEL_GS_BASE:
30231+ case MSR_CSTAR:
30232+ case MSR_LSTAR:
30233+ if (is_noncanonical_address(msr->data))
30234+ return 1;
30235+ break;
30236+ case MSR_IA32_SYSENTER_EIP:
30237+ case MSR_IA32_SYSENTER_ESP:
30238+ /*
30239+ * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
30240+ * non-canonical address is written on Intel but not on
30241+ * AMD (which ignores the top 32-bits, because it does
30242+ * not implement 64-bit SYSENTER).
30243+ *
30244+ * 64-bit code should hence be able to write a non-canonical
30245+ * value on AMD. Making the address canonical ensures that
30246+ * vmentry does not fail on Intel after writing a non-canonical
30247+ * value, and that something deterministic happens if the guest
30248+ * invokes 64-bit SYSENTER.
30249+ */
30250+ msr->data = get_canonical(msr->data);
30251+ }
30252 return kvm_x86_ops->set_msr(vcpu, msr);
30253 }
30254+EXPORT_SYMBOL_GPL(kvm_set_msr);
30255
30256 /*
30257 * Adapt set_msr() to msr_io()'s calling convention
30258@@ -1827,8 +1857,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
30259 {
30260 struct kvm *kvm = vcpu->kvm;
30261 int lm = is_long_mode(vcpu);
30262- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
30263- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
30264+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
30265+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
30266 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
30267 : kvm->arch.xen_hvm_config.blob_size_32;
30268 u32 page_num = data & ~PAGE_MASK;
30269@@ -2749,6 +2779,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
30270 if (n < msr_list.nmsrs)
30271 goto out;
30272 r = -EFAULT;
30273+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
30274+ goto out;
30275 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
30276 num_msrs_to_save * sizeof(u32)))
30277 goto out;
30278@@ -5609,7 +5641,7 @@ static struct notifier_block pvclock_gtod_notifier = {
30279 };
30280 #endif
30281
30282-int kvm_arch_init(void *opaque)
30283+int kvm_arch_init(const void *opaque)
30284 {
30285 int r;
30286 struct kvm_x86_ops *ops = opaque;
30287diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
30288index aae9413..d11e829 100644
30289--- a/arch/x86/lguest/boot.c
30290+++ b/arch/x86/lguest/boot.c
30291@@ -1206,9 +1206,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
30292 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
30293 * Launcher to reboot us.
30294 */
30295-static void lguest_restart(char *reason)
30296+static __noreturn void lguest_restart(char *reason)
30297 {
30298 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
30299+ BUG();
30300 }
30301
30302 /*G:050
30303diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
30304index 00933d5..3a64af9 100644
30305--- a/arch/x86/lib/atomic64_386_32.S
30306+++ b/arch/x86/lib/atomic64_386_32.S
30307@@ -48,6 +48,10 @@ BEGIN(read)
30308 movl (v), %eax
30309 movl 4(v), %edx
30310 RET_ENDP
30311+BEGIN(read_unchecked)
30312+ movl (v), %eax
30313+ movl 4(v), %edx
30314+RET_ENDP
30315 #undef v
30316
30317 #define v %esi
30318@@ -55,6 +59,10 @@ BEGIN(set)
30319 movl %ebx, (v)
30320 movl %ecx, 4(v)
30321 RET_ENDP
30322+BEGIN(set_unchecked)
30323+ movl %ebx, (v)
30324+ movl %ecx, 4(v)
30325+RET_ENDP
30326 #undef v
30327
30328 #define v %esi
30329@@ -70,6 +78,20 @@ RET_ENDP
30330 BEGIN(add)
30331 addl %eax, (v)
30332 adcl %edx, 4(v)
30333+
30334+#ifdef CONFIG_PAX_REFCOUNT
30335+ jno 0f
30336+ subl %eax, (v)
30337+ sbbl %edx, 4(v)
30338+ int $4
30339+0:
30340+ _ASM_EXTABLE(0b, 0b)
30341+#endif
30342+
30343+RET_ENDP
30344+BEGIN(add_unchecked)
30345+ addl %eax, (v)
30346+ adcl %edx, 4(v)
30347 RET_ENDP
30348 #undef v
30349
30350@@ -77,6 +99,24 @@ RET_ENDP
30351 BEGIN(add_return)
30352 addl (v), %eax
30353 adcl 4(v), %edx
30354+
30355+#ifdef CONFIG_PAX_REFCOUNT
30356+ into
30357+1234:
30358+ _ASM_EXTABLE(1234b, 2f)
30359+#endif
30360+
30361+ movl %eax, (v)
30362+ movl %edx, 4(v)
30363+
30364+#ifdef CONFIG_PAX_REFCOUNT
30365+2:
30366+#endif
30367+
30368+RET_ENDP
30369+BEGIN(add_return_unchecked)
30370+ addl (v), %eax
30371+ adcl 4(v), %edx
30372 movl %eax, (v)
30373 movl %edx, 4(v)
30374 RET_ENDP
30375@@ -86,6 +126,20 @@ RET_ENDP
30376 BEGIN(sub)
30377 subl %eax, (v)
30378 sbbl %edx, 4(v)
30379+
30380+#ifdef CONFIG_PAX_REFCOUNT
30381+ jno 0f
30382+ addl %eax, (v)
30383+ adcl %edx, 4(v)
30384+ int $4
30385+0:
30386+ _ASM_EXTABLE(0b, 0b)
30387+#endif
30388+
30389+RET_ENDP
30390+BEGIN(sub_unchecked)
30391+ subl %eax, (v)
30392+ sbbl %edx, 4(v)
30393 RET_ENDP
30394 #undef v
30395
30396@@ -96,6 +150,27 @@ BEGIN(sub_return)
30397 sbbl $0, %edx
30398 addl (v), %eax
30399 adcl 4(v), %edx
30400+
30401+#ifdef CONFIG_PAX_REFCOUNT
30402+ into
30403+1234:
30404+ _ASM_EXTABLE(1234b, 2f)
30405+#endif
30406+
30407+ movl %eax, (v)
30408+ movl %edx, 4(v)
30409+
30410+#ifdef CONFIG_PAX_REFCOUNT
30411+2:
30412+#endif
30413+
30414+RET_ENDP
30415+BEGIN(sub_return_unchecked)
30416+ negl %edx
30417+ negl %eax
30418+ sbbl $0, %edx
30419+ addl (v), %eax
30420+ adcl 4(v), %edx
30421 movl %eax, (v)
30422 movl %edx, 4(v)
30423 RET_ENDP
30424@@ -105,6 +180,20 @@ RET_ENDP
30425 BEGIN(inc)
30426 addl $1, (v)
30427 adcl $0, 4(v)
30428+
30429+#ifdef CONFIG_PAX_REFCOUNT
30430+ jno 0f
30431+ subl $1, (v)
30432+ sbbl $0, 4(v)
30433+ int $4
30434+0:
30435+ _ASM_EXTABLE(0b, 0b)
30436+#endif
30437+
30438+RET_ENDP
30439+BEGIN(inc_unchecked)
30440+ addl $1, (v)
30441+ adcl $0, 4(v)
30442 RET_ENDP
30443 #undef v
30444
30445@@ -114,6 +203,26 @@ BEGIN(inc_return)
30446 movl 4(v), %edx
30447 addl $1, %eax
30448 adcl $0, %edx
30449+
30450+#ifdef CONFIG_PAX_REFCOUNT
30451+ into
30452+1234:
30453+ _ASM_EXTABLE(1234b, 2f)
30454+#endif
30455+
30456+ movl %eax, (v)
30457+ movl %edx, 4(v)
30458+
30459+#ifdef CONFIG_PAX_REFCOUNT
30460+2:
30461+#endif
30462+
30463+RET_ENDP
30464+BEGIN(inc_return_unchecked)
30465+ movl (v), %eax
30466+ movl 4(v), %edx
30467+ addl $1, %eax
30468+ adcl $0, %edx
30469 movl %eax, (v)
30470 movl %edx, 4(v)
30471 RET_ENDP
30472@@ -123,6 +232,20 @@ RET_ENDP
30473 BEGIN(dec)
30474 subl $1, (v)
30475 sbbl $0, 4(v)
30476+
30477+#ifdef CONFIG_PAX_REFCOUNT
30478+ jno 0f
30479+ addl $1, (v)
30480+ adcl $0, 4(v)
30481+ int $4
30482+0:
30483+ _ASM_EXTABLE(0b, 0b)
30484+#endif
30485+
30486+RET_ENDP
30487+BEGIN(dec_unchecked)
30488+ subl $1, (v)
30489+ sbbl $0, 4(v)
30490 RET_ENDP
30491 #undef v
30492
30493@@ -132,6 +255,26 @@ BEGIN(dec_return)
30494 movl 4(v), %edx
30495 subl $1, %eax
30496 sbbl $0, %edx
30497+
30498+#ifdef CONFIG_PAX_REFCOUNT
30499+ into
30500+1234:
30501+ _ASM_EXTABLE(1234b, 2f)
30502+#endif
30503+
30504+ movl %eax, (v)
30505+ movl %edx, 4(v)
30506+
30507+#ifdef CONFIG_PAX_REFCOUNT
30508+2:
30509+#endif
30510+
30511+RET_ENDP
30512+BEGIN(dec_return_unchecked)
30513+ movl (v), %eax
30514+ movl 4(v), %edx
30515+ subl $1, %eax
30516+ sbbl $0, %edx
30517 movl %eax, (v)
30518 movl %edx, 4(v)
30519 RET_ENDP
30520@@ -143,6 +286,13 @@ BEGIN(add_unless)
30521 adcl %edx, %edi
30522 addl (v), %eax
30523 adcl 4(v), %edx
30524+
30525+#ifdef CONFIG_PAX_REFCOUNT
30526+ into
30527+1234:
30528+ _ASM_EXTABLE(1234b, 2f)
30529+#endif
30530+
30531 cmpl %eax, %ecx
30532 je 3f
30533 1:
30534@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
30535 1:
30536 addl $1, %eax
30537 adcl $0, %edx
30538+
30539+#ifdef CONFIG_PAX_REFCOUNT
30540+ into
30541+1234:
30542+ _ASM_EXTABLE(1234b, 2f)
30543+#endif
30544+
30545 movl %eax, (v)
30546 movl %edx, 4(v)
30547 movl $1, %eax
30548@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
30549 movl 4(v), %edx
30550 subl $1, %eax
30551 sbbl $0, %edx
30552+
30553+#ifdef CONFIG_PAX_REFCOUNT
30554+ into
30555+1234:
30556+ _ASM_EXTABLE(1234b, 1f)
30557+#endif
30558+
30559 js 1f
30560 movl %eax, (v)
30561 movl %edx, 4(v)
30562diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
30563index f5cc9eb..51fa319 100644
30564--- a/arch/x86/lib/atomic64_cx8_32.S
30565+++ b/arch/x86/lib/atomic64_cx8_32.S
30566@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
30567 CFI_STARTPROC
30568
30569 read64 %ecx
30570+ pax_force_retaddr
30571 ret
30572 CFI_ENDPROC
30573 ENDPROC(atomic64_read_cx8)
30574
30575+ENTRY(atomic64_read_unchecked_cx8)
30576+ CFI_STARTPROC
30577+
30578+ read64 %ecx
30579+ pax_force_retaddr
30580+ ret
30581+ CFI_ENDPROC
30582+ENDPROC(atomic64_read_unchecked_cx8)
30583+
30584 ENTRY(atomic64_set_cx8)
30585 CFI_STARTPROC
30586
30587@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
30588 cmpxchg8b (%esi)
30589 jne 1b
30590
30591+ pax_force_retaddr
30592 ret
30593 CFI_ENDPROC
30594 ENDPROC(atomic64_set_cx8)
30595
30596+ENTRY(atomic64_set_unchecked_cx8)
30597+ CFI_STARTPROC
30598+
30599+1:
30600+/* we don't need LOCK_PREFIX since aligned 64-bit writes
30601+ * are atomic on 586 and newer */
30602+ cmpxchg8b (%esi)
30603+ jne 1b
30604+
30605+ pax_force_retaddr
30606+ ret
30607+ CFI_ENDPROC
30608+ENDPROC(atomic64_set_unchecked_cx8)
30609+
30610 ENTRY(atomic64_xchg_cx8)
30611 CFI_STARTPROC
30612
30613@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
30614 cmpxchg8b (%esi)
30615 jne 1b
30616
30617+ pax_force_retaddr
30618 ret
30619 CFI_ENDPROC
30620 ENDPROC(atomic64_xchg_cx8)
30621
30622-.macro addsub_return func ins insc
30623-ENTRY(atomic64_\func\()_return_cx8)
30624+.macro addsub_return func ins insc unchecked=""
30625+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
30626 CFI_STARTPROC
30627 SAVE ebp
30628 SAVE ebx
30629@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
30630 movl %edx, %ecx
30631 \ins\()l %esi, %ebx
30632 \insc\()l %edi, %ecx
30633+
30634+.ifb \unchecked
30635+#ifdef CONFIG_PAX_REFCOUNT
30636+ into
30637+2:
30638+ _ASM_EXTABLE(2b, 3f)
30639+#endif
30640+.endif
30641+
30642 LOCK_PREFIX
30643 cmpxchg8b (%ebp)
30644 jne 1b
30645-
30646-10:
30647 movl %ebx, %eax
30648 movl %ecx, %edx
30649+
30650+.ifb \unchecked
30651+#ifdef CONFIG_PAX_REFCOUNT
30652+3:
30653+#endif
30654+.endif
30655+
30656 RESTORE edi
30657 RESTORE esi
30658 RESTORE ebx
30659 RESTORE ebp
30660+ pax_force_retaddr
30661 ret
30662 CFI_ENDPROC
30663-ENDPROC(atomic64_\func\()_return_cx8)
30664+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
30665 .endm
30666
30667 addsub_return add add adc
30668 addsub_return sub sub sbb
30669+addsub_return add add adc _unchecked
30670+addsub_return sub sub sbb _unchecked
30671
30672-.macro incdec_return func ins insc
30673-ENTRY(atomic64_\func\()_return_cx8)
30674+.macro incdec_return func ins insc unchecked=""
30675+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
30676 CFI_STARTPROC
30677 SAVE ebx
30678
30679@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
30680 movl %edx, %ecx
30681 \ins\()l $1, %ebx
30682 \insc\()l $0, %ecx
30683+
30684+.ifb \unchecked
30685+#ifdef CONFIG_PAX_REFCOUNT
30686+ into
30687+2:
30688+ _ASM_EXTABLE(2b, 3f)
30689+#endif
30690+.endif
30691+
30692 LOCK_PREFIX
30693 cmpxchg8b (%esi)
30694 jne 1b
30695
30696-10:
30697 movl %ebx, %eax
30698 movl %ecx, %edx
30699+
30700+.ifb \unchecked
30701+#ifdef CONFIG_PAX_REFCOUNT
30702+3:
30703+#endif
30704+.endif
30705+
30706 RESTORE ebx
30707+ pax_force_retaddr
30708 ret
30709 CFI_ENDPROC
30710-ENDPROC(atomic64_\func\()_return_cx8)
30711+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
30712 .endm
30713
30714 incdec_return inc add adc
30715 incdec_return dec sub sbb
30716+incdec_return inc add adc _unchecked
30717+incdec_return dec sub sbb _unchecked
30718
30719 ENTRY(atomic64_dec_if_positive_cx8)
30720 CFI_STARTPROC
30721@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
30722 movl %edx, %ecx
30723 subl $1, %ebx
30724 sbb $0, %ecx
30725+
30726+#ifdef CONFIG_PAX_REFCOUNT
30727+ into
30728+1234:
30729+ _ASM_EXTABLE(1234b, 2f)
30730+#endif
30731+
30732 js 2f
30733 LOCK_PREFIX
30734 cmpxchg8b (%esi)
30735@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
30736 movl %ebx, %eax
30737 movl %ecx, %edx
30738 RESTORE ebx
30739+ pax_force_retaddr
30740 ret
30741 CFI_ENDPROC
30742 ENDPROC(atomic64_dec_if_positive_cx8)
30743@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
30744 movl %edx, %ecx
30745 addl %ebp, %ebx
30746 adcl %edi, %ecx
30747+
30748+#ifdef CONFIG_PAX_REFCOUNT
30749+ into
30750+1234:
30751+ _ASM_EXTABLE(1234b, 3f)
30752+#endif
30753+
30754 LOCK_PREFIX
30755 cmpxchg8b (%esi)
30756 jne 1b
30757@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
30758 CFI_ADJUST_CFA_OFFSET -8
30759 RESTORE ebx
30760 RESTORE ebp
30761+ pax_force_retaddr
30762 ret
30763 4:
30764 cmpl %edx, 4(%esp)
30765@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
30766 xorl %ecx, %ecx
30767 addl $1, %ebx
30768 adcl %edx, %ecx
30769+
30770+#ifdef CONFIG_PAX_REFCOUNT
30771+ into
30772+1234:
30773+ _ASM_EXTABLE(1234b, 3f)
30774+#endif
30775+
30776 LOCK_PREFIX
30777 cmpxchg8b (%esi)
30778 jne 1b
30779@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
30780 movl $1, %eax
30781 3:
30782 RESTORE ebx
30783+ pax_force_retaddr
30784 ret
30785 CFI_ENDPROC
30786 ENDPROC(atomic64_inc_not_zero_cx8)
30787diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
30788index e78b8eee..7e173a8 100644
30789--- a/arch/x86/lib/checksum_32.S
30790+++ b/arch/x86/lib/checksum_32.S
30791@@ -29,7 +29,8 @@
30792 #include <asm/dwarf2.h>
30793 #include <asm/errno.h>
30794 #include <asm/asm.h>
30795-
30796+#include <asm/segment.h>
30797+
30798 /*
30799 * computes a partial checksum, e.g. for TCP/UDP fragments
30800 */
30801@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
30802
30803 #define ARGBASE 16
30804 #define FP 12
30805-
30806-ENTRY(csum_partial_copy_generic)
30807+
30808+ENTRY(csum_partial_copy_generic_to_user)
30809 CFI_STARTPROC
30810+
30811+#ifdef CONFIG_PAX_MEMORY_UDEREF
30812+ pushl_cfi %gs
30813+ popl_cfi %es
30814+ jmp csum_partial_copy_generic
30815+#endif
30816+
30817+ENTRY(csum_partial_copy_generic_from_user)
30818+
30819+#ifdef CONFIG_PAX_MEMORY_UDEREF
30820+ pushl_cfi %gs
30821+ popl_cfi %ds
30822+#endif
30823+
30824+ENTRY(csum_partial_copy_generic)
30825 subl $4,%esp
30826 CFI_ADJUST_CFA_OFFSET 4
30827 pushl_cfi %edi
30828@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
30829 jmp 4f
30830 SRC(1: movw (%esi), %bx )
30831 addl $2, %esi
30832-DST( movw %bx, (%edi) )
30833+DST( movw %bx, %es:(%edi) )
30834 addl $2, %edi
30835 addw %bx, %ax
30836 adcl $0, %eax
30837@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
30838 SRC(1: movl (%esi), %ebx )
30839 SRC( movl 4(%esi), %edx )
30840 adcl %ebx, %eax
30841-DST( movl %ebx, (%edi) )
30842+DST( movl %ebx, %es:(%edi) )
30843 adcl %edx, %eax
30844-DST( movl %edx, 4(%edi) )
30845+DST( movl %edx, %es:4(%edi) )
30846
30847 SRC( movl 8(%esi), %ebx )
30848 SRC( movl 12(%esi), %edx )
30849 adcl %ebx, %eax
30850-DST( movl %ebx, 8(%edi) )
30851+DST( movl %ebx, %es:8(%edi) )
30852 adcl %edx, %eax
30853-DST( movl %edx, 12(%edi) )
30854+DST( movl %edx, %es:12(%edi) )
30855
30856 SRC( movl 16(%esi), %ebx )
30857 SRC( movl 20(%esi), %edx )
30858 adcl %ebx, %eax
30859-DST( movl %ebx, 16(%edi) )
30860+DST( movl %ebx, %es:16(%edi) )
30861 adcl %edx, %eax
30862-DST( movl %edx, 20(%edi) )
30863+DST( movl %edx, %es:20(%edi) )
30864
30865 SRC( movl 24(%esi), %ebx )
30866 SRC( movl 28(%esi), %edx )
30867 adcl %ebx, %eax
30868-DST( movl %ebx, 24(%edi) )
30869+DST( movl %ebx, %es:24(%edi) )
30870 adcl %edx, %eax
30871-DST( movl %edx, 28(%edi) )
30872+DST( movl %edx, %es:28(%edi) )
30873
30874 lea 32(%esi), %esi
30875 lea 32(%edi), %edi
30876@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
30877 shrl $2, %edx # This clears CF
30878 SRC(3: movl (%esi), %ebx )
30879 adcl %ebx, %eax
30880-DST( movl %ebx, (%edi) )
30881+DST( movl %ebx, %es:(%edi) )
30882 lea 4(%esi), %esi
30883 lea 4(%edi), %edi
30884 dec %edx
30885@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
30886 jb 5f
30887 SRC( movw (%esi), %cx )
30888 leal 2(%esi), %esi
30889-DST( movw %cx, (%edi) )
30890+DST( movw %cx, %es:(%edi) )
30891 leal 2(%edi), %edi
30892 je 6f
30893 shll $16,%ecx
30894 SRC(5: movb (%esi), %cl )
30895-DST( movb %cl, (%edi) )
30896+DST( movb %cl, %es:(%edi) )
30897 6: addl %ecx, %eax
30898 adcl $0, %eax
30899 7:
30900@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
30901
30902 6001:
30903 movl ARGBASE+20(%esp), %ebx # src_err_ptr
30904- movl $-EFAULT, (%ebx)
30905+ movl $-EFAULT, %ss:(%ebx)
30906
30907 # zero the complete destination - computing the rest
30908 # is too much work
30909@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
30910
30911 6002:
30912 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
30913- movl $-EFAULT,(%ebx)
30914+ movl $-EFAULT,%ss:(%ebx)
30915 jmp 5000b
30916
30917 .previous
30918
30919+ pushl_cfi %ss
30920+ popl_cfi %ds
30921+ pushl_cfi %ss
30922+ popl_cfi %es
30923 popl_cfi %ebx
30924 CFI_RESTORE ebx
30925 popl_cfi %esi
30926@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
30927 popl_cfi %ecx # equivalent to addl $4,%esp
30928 ret
30929 CFI_ENDPROC
30930-ENDPROC(csum_partial_copy_generic)
30931+ENDPROC(csum_partial_copy_generic_to_user)
30932
30933 #else
30934
30935 /* Version for PentiumII/PPro */
30936
30937 #define ROUND1(x) \
30938+ nop; nop; nop; \
30939 SRC(movl x(%esi), %ebx ) ; \
30940 addl %ebx, %eax ; \
30941- DST(movl %ebx, x(%edi) ) ;
30942+ DST(movl %ebx, %es:x(%edi)) ;
30943
30944 #define ROUND(x) \
30945+ nop; nop; nop; \
30946 SRC(movl x(%esi), %ebx ) ; \
30947 adcl %ebx, %eax ; \
30948- DST(movl %ebx, x(%edi) ) ;
30949+ DST(movl %ebx, %es:x(%edi)) ;
30950
30951 #define ARGBASE 12
30952-
30953-ENTRY(csum_partial_copy_generic)
30954+
30955+ENTRY(csum_partial_copy_generic_to_user)
30956 CFI_STARTPROC
30957+
30958+#ifdef CONFIG_PAX_MEMORY_UDEREF
30959+ pushl_cfi %gs
30960+ popl_cfi %es
30961+ jmp csum_partial_copy_generic
30962+#endif
30963+
30964+ENTRY(csum_partial_copy_generic_from_user)
30965+
30966+#ifdef CONFIG_PAX_MEMORY_UDEREF
30967+ pushl_cfi %gs
30968+ popl_cfi %ds
30969+#endif
30970+
30971+ENTRY(csum_partial_copy_generic)
30972 pushl_cfi %ebx
30973 CFI_REL_OFFSET ebx, 0
30974 pushl_cfi %edi
30975@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
30976 subl %ebx, %edi
30977 lea -1(%esi),%edx
30978 andl $-32,%edx
30979- lea 3f(%ebx,%ebx), %ebx
30980+ lea 3f(%ebx,%ebx,2), %ebx
30981 testl %esi, %esi
30982 jmp *%ebx
30983 1: addl $64,%esi
30984@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
30985 jb 5f
30986 SRC( movw (%esi), %dx )
30987 leal 2(%esi), %esi
30988-DST( movw %dx, (%edi) )
30989+DST( movw %dx, %es:(%edi) )
30990 leal 2(%edi), %edi
30991 je 6f
30992 shll $16,%edx
30993 5:
30994 SRC( movb (%esi), %dl )
30995-DST( movb %dl, (%edi) )
30996+DST( movb %dl, %es:(%edi) )
30997 6: addl %edx, %eax
30998 adcl $0, %eax
30999 7:
31000 .section .fixup, "ax"
31001 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
31002- movl $-EFAULT, (%ebx)
31003+ movl $-EFAULT, %ss:(%ebx)
31004 # zero the complete destination (computing the rest is too much work)
31005 movl ARGBASE+8(%esp),%edi # dst
31006 movl ARGBASE+12(%esp),%ecx # len
31007@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
31008 rep; stosb
31009 jmp 7b
31010 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
31011- movl $-EFAULT, (%ebx)
31012+ movl $-EFAULT, %ss:(%ebx)
31013 jmp 7b
31014 .previous
31015
31016+#ifdef CONFIG_PAX_MEMORY_UDEREF
31017+ pushl_cfi %ss
31018+ popl_cfi %ds
31019+ pushl_cfi %ss
31020+ popl_cfi %es
31021+#endif
31022+
31023 popl_cfi %esi
31024 CFI_RESTORE esi
31025 popl_cfi %edi
31026@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
31027 CFI_RESTORE ebx
31028 ret
31029 CFI_ENDPROC
31030-ENDPROC(csum_partial_copy_generic)
31031+ENDPROC(csum_partial_copy_generic_to_user)
31032
31033 #undef ROUND
31034 #undef ROUND1
31035diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
31036index f2145cf..cea889d 100644
31037--- a/arch/x86/lib/clear_page_64.S
31038+++ b/arch/x86/lib/clear_page_64.S
31039@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
31040 movl $4096/8,%ecx
31041 xorl %eax,%eax
31042 rep stosq
31043+ pax_force_retaddr
31044 ret
31045 CFI_ENDPROC
31046 ENDPROC(clear_page_c)
31047@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
31048 movl $4096,%ecx
31049 xorl %eax,%eax
31050 rep stosb
31051+ pax_force_retaddr
31052 ret
31053 CFI_ENDPROC
31054 ENDPROC(clear_page_c_e)
31055@@ -43,6 +45,7 @@ ENTRY(clear_page)
31056 leaq 64(%rdi),%rdi
31057 jnz .Lloop
31058 nop
31059+ pax_force_retaddr
31060 ret
31061 CFI_ENDPROC
31062 .Lclear_page_end:
31063@@ -58,7 +61,7 @@ ENDPROC(clear_page)
31064
31065 #include <asm/cpufeature.h>
31066
31067- .section .altinstr_replacement,"ax"
31068+ .section .altinstr_replacement,"a"
31069 1: .byte 0xeb /* jmp <disp8> */
31070 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
31071 2: .byte 0xeb /* jmp <disp8> */
31072diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
31073index 1e572c5..2a162cd 100644
31074--- a/arch/x86/lib/cmpxchg16b_emu.S
31075+++ b/arch/x86/lib/cmpxchg16b_emu.S
31076@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
31077
31078 popf
31079 mov $1, %al
31080+ pax_force_retaddr
31081 ret
31082
31083 not_same:
31084 popf
31085 xor %al,%al
31086+ pax_force_retaddr
31087 ret
31088
31089 CFI_ENDPROC
31090diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
31091index 176cca6..e0d658e 100644
31092--- a/arch/x86/lib/copy_page_64.S
31093+++ b/arch/x86/lib/copy_page_64.S
31094@@ -9,6 +9,7 @@ copy_page_rep:
31095 CFI_STARTPROC
31096 movl $4096/8, %ecx
31097 rep movsq
31098+ pax_force_retaddr
31099 ret
31100 CFI_ENDPROC
31101 ENDPROC(copy_page_rep)
31102@@ -24,8 +25,8 @@ ENTRY(copy_page)
31103 CFI_ADJUST_CFA_OFFSET 2*8
31104 movq %rbx, (%rsp)
31105 CFI_REL_OFFSET rbx, 0
31106- movq %r12, 1*8(%rsp)
31107- CFI_REL_OFFSET r12, 1*8
31108+ movq %r13, 1*8(%rsp)
31109+ CFI_REL_OFFSET r13, 1*8
31110
31111 movl $(4096/64)-5, %ecx
31112 .p2align 4
31113@@ -38,7 +39,7 @@ ENTRY(copy_page)
31114 movq 0x8*4(%rsi), %r9
31115 movq 0x8*5(%rsi), %r10
31116 movq 0x8*6(%rsi), %r11
31117- movq 0x8*7(%rsi), %r12
31118+ movq 0x8*7(%rsi), %r13
31119
31120 prefetcht0 5*64(%rsi)
31121
31122@@ -49,7 +50,7 @@ ENTRY(copy_page)
31123 movq %r9, 0x8*4(%rdi)
31124 movq %r10, 0x8*5(%rdi)
31125 movq %r11, 0x8*6(%rdi)
31126- movq %r12, 0x8*7(%rdi)
31127+ movq %r13, 0x8*7(%rdi)
31128
31129 leaq 64 (%rsi), %rsi
31130 leaq 64 (%rdi), %rdi
31131@@ -68,7 +69,7 @@ ENTRY(copy_page)
31132 movq 0x8*4(%rsi), %r9
31133 movq 0x8*5(%rsi), %r10
31134 movq 0x8*6(%rsi), %r11
31135- movq 0x8*7(%rsi), %r12
31136+ movq 0x8*7(%rsi), %r13
31137
31138 movq %rax, 0x8*0(%rdi)
31139 movq %rbx, 0x8*1(%rdi)
31140@@ -77,7 +78,7 @@ ENTRY(copy_page)
31141 movq %r9, 0x8*4(%rdi)
31142 movq %r10, 0x8*5(%rdi)
31143 movq %r11, 0x8*6(%rdi)
31144- movq %r12, 0x8*7(%rdi)
31145+ movq %r13, 0x8*7(%rdi)
31146
31147 leaq 64(%rdi), %rdi
31148 leaq 64(%rsi), %rsi
31149@@ -85,10 +86,11 @@ ENTRY(copy_page)
31150
31151 movq (%rsp), %rbx
31152 CFI_RESTORE rbx
31153- movq 1*8(%rsp), %r12
31154- CFI_RESTORE r12
31155+ movq 1*8(%rsp), %r13
31156+ CFI_RESTORE r13
31157 addq $2*8, %rsp
31158 CFI_ADJUST_CFA_OFFSET -2*8
31159+ pax_force_retaddr
31160 ret
31161 .Lcopy_page_end:
31162 CFI_ENDPROC
31163@@ -99,7 +101,7 @@ ENDPROC(copy_page)
31164
31165 #include <asm/cpufeature.h>
31166
31167- .section .altinstr_replacement,"ax"
31168+ .section .altinstr_replacement,"a"
31169 1: .byte 0xeb /* jmp <disp8> */
31170 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
31171 2:
31172diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
31173index dee945d..a84067b 100644
31174--- a/arch/x86/lib/copy_user_64.S
31175+++ b/arch/x86/lib/copy_user_64.S
31176@@ -18,31 +18,7 @@
31177 #include <asm/alternative-asm.h>
31178 #include <asm/asm.h>
31179 #include <asm/smap.h>
31180-
31181-/*
31182- * By placing feature2 after feature1 in altinstructions section, we logically
31183- * implement:
31184- * If CPU has feature2, jmp to alt2 is used
31185- * else if CPU has feature1, jmp to alt1 is used
31186- * else jmp to orig is used.
31187- */
31188- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
31189-0:
31190- .byte 0xe9 /* 32bit jump */
31191- .long \orig-1f /* by default jump to orig */
31192-1:
31193- .section .altinstr_replacement,"ax"
31194-2: .byte 0xe9 /* near jump with 32bit immediate */
31195- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
31196-3: .byte 0xe9 /* near jump with 32bit immediate */
31197- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
31198- .previous
31199-
31200- .section .altinstructions,"a"
31201- altinstruction_entry 0b,2b,\feature1,5,5
31202- altinstruction_entry 0b,3b,\feature2,5,5
31203- .previous
31204- .endm
31205+#include <asm/pgtable.h>
31206
31207 .macro ALIGN_DESTINATION
31208 #ifdef FIX_ALIGNMENT
31209@@ -70,52 +46,6 @@
31210 #endif
31211 .endm
31212
31213-/* Standard copy_to_user with segment limit checking */
31214-ENTRY(_copy_to_user)
31215- CFI_STARTPROC
31216- GET_THREAD_INFO(%rax)
31217- movq %rdi,%rcx
31218- addq %rdx,%rcx
31219- jc bad_to_user
31220- cmpq TI_addr_limit(%rax),%rcx
31221- ja bad_to_user
31222- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
31223- copy_user_generic_unrolled,copy_user_generic_string, \
31224- copy_user_enhanced_fast_string
31225- CFI_ENDPROC
31226-ENDPROC(_copy_to_user)
31227-
31228-/* Standard copy_from_user with segment limit checking */
31229-ENTRY(_copy_from_user)
31230- CFI_STARTPROC
31231- GET_THREAD_INFO(%rax)
31232- movq %rsi,%rcx
31233- addq %rdx,%rcx
31234- jc bad_from_user
31235- cmpq TI_addr_limit(%rax),%rcx
31236- ja bad_from_user
31237- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
31238- copy_user_generic_unrolled,copy_user_generic_string, \
31239- copy_user_enhanced_fast_string
31240- CFI_ENDPROC
31241-ENDPROC(_copy_from_user)
31242-
31243- .section .fixup,"ax"
31244- /* must zero dest */
31245-ENTRY(bad_from_user)
31246-bad_from_user:
31247- CFI_STARTPROC
31248- movl %edx,%ecx
31249- xorl %eax,%eax
31250- rep
31251- stosb
31252-bad_to_user:
31253- movl %edx,%eax
31254- ret
31255- CFI_ENDPROC
31256-ENDPROC(bad_from_user)
31257- .previous
31258-
31259 /*
31260 * copy_user_generic_unrolled - memory copy with exception handling.
31261 * This version is for CPUs like P4 that don't have efficient micro
31262@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
31263 */
31264 ENTRY(copy_user_generic_unrolled)
31265 CFI_STARTPROC
31266+ ASM_PAX_OPEN_USERLAND
31267 ASM_STAC
31268 cmpl $8,%edx
31269 jb 20f /* less then 8 bytes, go to byte copy loop */
31270@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
31271 jnz 21b
31272 23: xor %eax,%eax
31273 ASM_CLAC
31274+ ASM_PAX_CLOSE_USERLAND
31275+ pax_force_retaddr
31276 ret
31277
31278 .section .fixup,"ax"
31279@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
31280 */
31281 ENTRY(copy_user_generic_string)
31282 CFI_STARTPROC
31283+ ASM_PAX_OPEN_USERLAND
31284 ASM_STAC
31285 cmpl $8,%edx
31286 jb 2f /* less than 8 bytes, go to byte copy loop */
31287@@ -249,6 +183,8 @@ ENTRY(copy_user_generic_string)
31288 movsb
31289 xorl %eax,%eax
31290 ASM_CLAC
31291+ ASM_PAX_CLOSE_USERLAND
31292+ pax_force_retaddr
31293 ret
31294
31295 .section .fixup,"ax"
31296@@ -276,12 +212,15 @@ ENDPROC(copy_user_generic_string)
31297 */
31298 ENTRY(copy_user_enhanced_fast_string)
31299 CFI_STARTPROC
31300+ ASM_PAX_OPEN_USERLAND
31301 ASM_STAC
31302 movl %edx,%ecx
31303 1: rep
31304 movsb
31305 xorl %eax,%eax
31306 ASM_CLAC
31307+ ASM_PAX_CLOSE_USERLAND
31308+ pax_force_retaddr
31309 ret
31310
31311 .section .fixup,"ax"
31312diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
31313index 6a4f43c..c70fb52 100644
31314--- a/arch/x86/lib/copy_user_nocache_64.S
31315+++ b/arch/x86/lib/copy_user_nocache_64.S
31316@@ -8,6 +8,7 @@
31317
31318 #include <linux/linkage.h>
31319 #include <asm/dwarf2.h>
31320+#include <asm/alternative-asm.h>
31321
31322 #define FIX_ALIGNMENT 1
31323
31324@@ -16,6 +17,7 @@
31325 #include <asm/thread_info.h>
31326 #include <asm/asm.h>
31327 #include <asm/smap.h>
31328+#include <asm/pgtable.h>
31329
31330 .macro ALIGN_DESTINATION
31331 #ifdef FIX_ALIGNMENT
31332@@ -49,6 +51,16 @@
31333 */
31334 ENTRY(__copy_user_nocache)
31335 CFI_STARTPROC
31336+
31337+#ifdef CONFIG_PAX_MEMORY_UDEREF
31338+ mov pax_user_shadow_base,%rcx
31339+ cmp %rcx,%rsi
31340+ jae 1f
31341+ add %rcx,%rsi
31342+1:
31343+#endif
31344+
31345+ ASM_PAX_OPEN_USERLAND
31346 ASM_STAC
31347 cmpl $8,%edx
31348 jb 20f /* less then 8 bytes, go to byte copy loop */
31349@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
31350 jnz 21b
31351 23: xorl %eax,%eax
31352 ASM_CLAC
31353+ ASM_PAX_CLOSE_USERLAND
31354 sfence
31355+ pax_force_retaddr
31356 ret
31357
31358 .section .fixup,"ax"
31359diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
31360index 2419d5f..fe52d0e 100644
31361--- a/arch/x86/lib/csum-copy_64.S
31362+++ b/arch/x86/lib/csum-copy_64.S
31363@@ -9,6 +9,7 @@
31364 #include <asm/dwarf2.h>
31365 #include <asm/errno.h>
31366 #include <asm/asm.h>
31367+#include <asm/alternative-asm.h>
31368
31369 /*
31370 * Checksum copy with exception handling.
31371@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
31372 CFI_ADJUST_CFA_OFFSET 7*8
31373 movq %rbx, 2*8(%rsp)
31374 CFI_REL_OFFSET rbx, 2*8
31375- movq %r12, 3*8(%rsp)
31376- CFI_REL_OFFSET r12, 3*8
31377+ movq %r15, 3*8(%rsp)
31378+ CFI_REL_OFFSET r15, 3*8
31379 movq %r14, 4*8(%rsp)
31380 CFI_REL_OFFSET r14, 4*8
31381 movq %r13, 5*8(%rsp)
31382@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
31383 movl %edx, %ecx
31384
31385 xorl %r9d, %r9d
31386- movq %rcx, %r12
31387+ movq %rcx, %r15
31388
31389- shrq $6, %r12
31390+ shrq $6, %r15
31391 jz .Lhandle_tail /* < 64 */
31392
31393 clc
31394
31395 /* main loop. clear in 64 byte blocks */
31396 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
31397- /* r11: temp3, rdx: temp4, r12 loopcnt */
31398+ /* r11: temp3, rdx: temp4, r15 loopcnt */
31399 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
31400 .p2align 4
31401 .Lloop:
31402@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
31403 adcq %r14, %rax
31404 adcq %r13, %rax
31405
31406- decl %r12d
31407+ decl %r15d
31408
31409 dest
31410 movq %rbx, (%rsi)
31411@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
31412 .Lende:
31413 movq 2*8(%rsp), %rbx
31414 CFI_RESTORE rbx
31415- movq 3*8(%rsp), %r12
31416- CFI_RESTORE r12
31417+ movq 3*8(%rsp), %r15
31418+ CFI_RESTORE r15
31419 movq 4*8(%rsp), %r14
31420 CFI_RESTORE r14
31421 movq 5*8(%rsp), %r13
31422@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
31423 CFI_RESTORE rbp
31424 addq $7*8, %rsp
31425 CFI_ADJUST_CFA_OFFSET -7*8
31426+ pax_force_retaddr
31427 ret
31428 CFI_RESTORE_STATE
31429
31430diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
31431index 7609e0e..b449b98 100644
31432--- a/arch/x86/lib/csum-wrappers_64.c
31433+++ b/arch/x86/lib/csum-wrappers_64.c
31434@@ -53,10 +53,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
31435 len -= 2;
31436 }
31437 }
31438+ pax_open_userland();
31439 stac();
31440- isum = csum_partial_copy_generic((__force const void *)src,
31441+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
31442 dst, len, isum, errp, NULL);
31443 clac();
31444+ pax_close_userland();
31445 if (unlikely(*errp))
31446 goto out_err;
31447
31448@@ -110,10 +112,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
31449 }
31450
31451 *errp = 0;
31452+ pax_open_userland();
31453 stac();
31454- ret = csum_partial_copy_generic(src, (void __force *)dst,
31455+ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
31456 len, isum, NULL, errp);
31457 clac();
31458+ pax_close_userland();
31459 return ret;
31460 }
31461 EXPORT_SYMBOL(csum_partial_copy_to_user);
31462diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
31463index a451235..1daa956 100644
31464--- a/arch/x86/lib/getuser.S
31465+++ b/arch/x86/lib/getuser.S
31466@@ -33,17 +33,40 @@
31467 #include <asm/thread_info.h>
31468 #include <asm/asm.h>
31469 #include <asm/smap.h>
31470+#include <asm/segment.h>
31471+#include <asm/pgtable.h>
31472+#include <asm/alternative-asm.h>
31473+
31474+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
31475+#define __copyuser_seg gs;
31476+#else
31477+#define __copyuser_seg
31478+#endif
31479
31480 .text
31481 ENTRY(__get_user_1)
31482 CFI_STARTPROC
31483+
31484+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31485 GET_THREAD_INFO(%_ASM_DX)
31486 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
31487 jae bad_get_user
31488 ASM_STAC
31489-1: movzbl (%_ASM_AX),%edx
31490+
31491+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31492+ mov pax_user_shadow_base,%_ASM_DX
31493+ cmp %_ASM_DX,%_ASM_AX
31494+ jae 1234f
31495+ add %_ASM_DX,%_ASM_AX
31496+1234:
31497+#endif
31498+
31499+#endif
31500+
31501+1: __copyuser_seg movzbl (%_ASM_AX),%edx
31502 xor %eax,%eax
31503 ASM_CLAC
31504+ pax_force_retaddr
31505 ret
31506 CFI_ENDPROC
31507 ENDPROC(__get_user_1)
31508@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
31509 ENTRY(__get_user_2)
31510 CFI_STARTPROC
31511 add $1,%_ASM_AX
31512+
31513+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31514 jc bad_get_user
31515 GET_THREAD_INFO(%_ASM_DX)
31516 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
31517 jae bad_get_user
31518 ASM_STAC
31519-2: movzwl -1(%_ASM_AX),%edx
31520+
31521+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31522+ mov pax_user_shadow_base,%_ASM_DX
31523+ cmp %_ASM_DX,%_ASM_AX
31524+ jae 1234f
31525+ add %_ASM_DX,%_ASM_AX
31526+1234:
31527+#endif
31528+
31529+#endif
31530+
31531+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
31532 xor %eax,%eax
31533 ASM_CLAC
31534+ pax_force_retaddr
31535 ret
31536 CFI_ENDPROC
31537 ENDPROC(__get_user_2)
31538@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
31539 ENTRY(__get_user_4)
31540 CFI_STARTPROC
31541 add $3,%_ASM_AX
31542+
31543+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31544 jc bad_get_user
31545 GET_THREAD_INFO(%_ASM_DX)
31546 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
31547 jae bad_get_user
31548 ASM_STAC
31549-3: movl -3(%_ASM_AX),%edx
31550+
31551+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31552+ mov pax_user_shadow_base,%_ASM_DX
31553+ cmp %_ASM_DX,%_ASM_AX
31554+ jae 1234f
31555+ add %_ASM_DX,%_ASM_AX
31556+1234:
31557+#endif
31558+
31559+#endif
31560+
31561+3: __copyuser_seg movl -3(%_ASM_AX),%edx
31562 xor %eax,%eax
31563 ASM_CLAC
31564+ pax_force_retaddr
31565 ret
31566 CFI_ENDPROC
31567 ENDPROC(__get_user_4)
31568@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
31569 GET_THREAD_INFO(%_ASM_DX)
31570 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
31571 jae bad_get_user
31572+
31573+#ifdef CONFIG_PAX_MEMORY_UDEREF
31574+ mov pax_user_shadow_base,%_ASM_DX
31575+ cmp %_ASM_DX,%_ASM_AX
31576+ jae 1234f
31577+ add %_ASM_DX,%_ASM_AX
31578+1234:
31579+#endif
31580+
31581 ASM_STAC
31582 4: movq -7(%_ASM_AX),%rdx
31583 xor %eax,%eax
31584 ASM_CLAC
31585+ pax_force_retaddr
31586 ret
31587 #else
31588 add $7,%_ASM_AX
31589@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
31590 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
31591 jae bad_get_user_8
31592 ASM_STAC
31593-4: movl -7(%_ASM_AX),%edx
31594-5: movl -3(%_ASM_AX),%ecx
31595+4: __copyuser_seg movl -7(%_ASM_AX),%edx
31596+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
31597 xor %eax,%eax
31598 ASM_CLAC
31599+ pax_force_retaddr
31600 ret
31601 #endif
31602 CFI_ENDPROC
31603@@ -113,6 +175,7 @@ bad_get_user:
31604 xor %edx,%edx
31605 mov $(-EFAULT),%_ASM_AX
31606 ASM_CLAC
31607+ pax_force_retaddr
31608 ret
31609 CFI_ENDPROC
31610 END(bad_get_user)
31611@@ -124,6 +187,7 @@ bad_get_user_8:
31612 xor %ecx,%ecx
31613 mov $(-EFAULT),%_ASM_AX
31614 ASM_CLAC
31615+ pax_force_retaddr
31616 ret
31617 CFI_ENDPROC
31618 END(bad_get_user_8)
31619diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
31620index 54fcffe..7be149e 100644
31621--- a/arch/x86/lib/insn.c
31622+++ b/arch/x86/lib/insn.c
31623@@ -20,8 +20,10 @@
31624
31625 #ifdef __KERNEL__
31626 #include <linux/string.h>
31627+#include <asm/pgtable_types.h>
31628 #else
31629 #include <string.h>
31630+#define ktla_ktva(addr) addr
31631 #endif
31632 #include <asm/inat.h>
31633 #include <asm/insn.h>
31634@@ -53,8 +55,8 @@
31635 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
31636 {
31637 memset(insn, 0, sizeof(*insn));
31638- insn->kaddr = kaddr;
31639- insn->next_byte = kaddr;
31640+ insn->kaddr = ktla_ktva(kaddr);
31641+ insn->next_byte = ktla_ktva(kaddr);
31642 insn->x86_64 = x86_64 ? 1 : 0;
31643 insn->opnd_bytes = 4;
31644 if (x86_64)
31645diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
31646index 05a95e7..326f2fa 100644
31647--- a/arch/x86/lib/iomap_copy_64.S
31648+++ b/arch/x86/lib/iomap_copy_64.S
31649@@ -17,6 +17,7 @@
31650
31651 #include <linux/linkage.h>
31652 #include <asm/dwarf2.h>
31653+#include <asm/alternative-asm.h>
31654
31655 /*
31656 * override generic version in lib/iomap_copy.c
31657@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
31658 CFI_STARTPROC
31659 movl %edx,%ecx
31660 rep movsd
31661+ pax_force_retaddr
31662 ret
31663 CFI_ENDPROC
31664 ENDPROC(__iowrite32_copy)
31665diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
31666index 56313a3..0db417e 100644
31667--- a/arch/x86/lib/memcpy_64.S
31668+++ b/arch/x86/lib/memcpy_64.S
31669@@ -24,7 +24,7 @@
31670 * This gets patched over the unrolled variant (below) via the
31671 * alternative instructions framework:
31672 */
31673- .section .altinstr_replacement, "ax", @progbits
31674+ .section .altinstr_replacement, "a", @progbits
31675 .Lmemcpy_c:
31676 movq %rdi, %rax
31677 movq %rdx, %rcx
31678@@ -33,6 +33,7 @@
31679 rep movsq
31680 movl %edx, %ecx
31681 rep movsb
31682+ pax_force_retaddr
31683 ret
31684 .Lmemcpy_e:
31685 .previous
31686@@ -44,11 +45,12 @@
31687 * This gets patched over the unrolled variant (below) via the
31688 * alternative instructions framework:
31689 */
31690- .section .altinstr_replacement, "ax", @progbits
31691+ .section .altinstr_replacement, "a", @progbits
31692 .Lmemcpy_c_e:
31693 movq %rdi, %rax
31694 movq %rdx, %rcx
31695 rep movsb
31696+ pax_force_retaddr
31697 ret
31698 .Lmemcpy_e_e:
31699 .previous
31700@@ -136,6 +138,7 @@ ENTRY(memcpy)
31701 movq %r9, 1*8(%rdi)
31702 movq %r10, -2*8(%rdi, %rdx)
31703 movq %r11, -1*8(%rdi, %rdx)
31704+ pax_force_retaddr
31705 retq
31706 .p2align 4
31707 .Lless_16bytes:
31708@@ -148,6 +151,7 @@ ENTRY(memcpy)
31709 movq -1*8(%rsi, %rdx), %r9
31710 movq %r8, 0*8(%rdi)
31711 movq %r9, -1*8(%rdi, %rdx)
31712+ pax_force_retaddr
31713 retq
31714 .p2align 4
31715 .Lless_8bytes:
31716@@ -161,6 +165,7 @@ ENTRY(memcpy)
31717 movl -4(%rsi, %rdx), %r8d
31718 movl %ecx, (%rdi)
31719 movl %r8d, -4(%rdi, %rdx)
31720+ pax_force_retaddr
31721 retq
31722 .p2align 4
31723 .Lless_3bytes:
31724@@ -179,6 +184,7 @@ ENTRY(memcpy)
31725 movb %cl, (%rdi)
31726
31727 .Lend:
31728+ pax_force_retaddr
31729 retq
31730 CFI_ENDPROC
31731 ENDPROC(memcpy)
31732diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
31733index 65268a6..dd1de11 100644
31734--- a/arch/x86/lib/memmove_64.S
31735+++ b/arch/x86/lib/memmove_64.S
31736@@ -202,14 +202,16 @@ ENTRY(memmove)
31737 movb (%rsi), %r11b
31738 movb %r11b, (%rdi)
31739 13:
31740+ pax_force_retaddr
31741 retq
31742 CFI_ENDPROC
31743
31744- .section .altinstr_replacement,"ax"
31745+ .section .altinstr_replacement,"a"
31746 .Lmemmove_begin_forward_efs:
31747 /* Forward moving data. */
31748 movq %rdx, %rcx
31749 rep movsb
31750+ pax_force_retaddr
31751 retq
31752 .Lmemmove_end_forward_efs:
31753 .previous
31754diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
31755index 2dcb380..2eb79fe 100644
31756--- a/arch/x86/lib/memset_64.S
31757+++ b/arch/x86/lib/memset_64.S
31758@@ -16,7 +16,7 @@
31759 *
31760 * rax original destination
31761 */
31762- .section .altinstr_replacement, "ax", @progbits
31763+ .section .altinstr_replacement, "a", @progbits
31764 .Lmemset_c:
31765 movq %rdi,%r9
31766 movq %rdx,%rcx
31767@@ -30,6 +30,7 @@
31768 movl %edx,%ecx
31769 rep stosb
31770 movq %r9,%rax
31771+ pax_force_retaddr
31772 ret
31773 .Lmemset_e:
31774 .previous
31775@@ -45,13 +46,14 @@
31776 *
31777 * rax original destination
31778 */
31779- .section .altinstr_replacement, "ax", @progbits
31780+ .section .altinstr_replacement, "a", @progbits
31781 .Lmemset_c_e:
31782 movq %rdi,%r9
31783 movb %sil,%al
31784 movq %rdx,%rcx
31785 rep stosb
31786 movq %r9,%rax
31787+ pax_force_retaddr
31788 ret
31789 .Lmemset_e_e:
31790 .previous
31791@@ -118,6 +120,7 @@ ENTRY(__memset)
31792
31793 .Lende:
31794 movq %r10,%rax
31795+ pax_force_retaddr
31796 ret
31797
31798 CFI_RESTORE_STATE
31799diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
31800index c9f2d9b..e7fd2c0 100644
31801--- a/arch/x86/lib/mmx_32.c
31802+++ b/arch/x86/lib/mmx_32.c
31803@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
31804 {
31805 void *p;
31806 int i;
31807+ unsigned long cr0;
31808
31809 if (unlikely(in_interrupt()))
31810 return __memcpy(to, from, len);
31811@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
31812 kernel_fpu_begin();
31813
31814 __asm__ __volatile__ (
31815- "1: prefetch (%0)\n" /* This set is 28 bytes */
31816- " prefetch 64(%0)\n"
31817- " prefetch 128(%0)\n"
31818- " prefetch 192(%0)\n"
31819- " prefetch 256(%0)\n"
31820+ "1: prefetch (%1)\n" /* This set is 28 bytes */
31821+ " prefetch 64(%1)\n"
31822+ " prefetch 128(%1)\n"
31823+ " prefetch 192(%1)\n"
31824+ " prefetch 256(%1)\n"
31825 "2: \n"
31826 ".section .fixup, \"ax\"\n"
31827- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
31828+ "3: \n"
31829+
31830+#ifdef CONFIG_PAX_KERNEXEC
31831+ " movl %%cr0, %0\n"
31832+ " movl %0, %%eax\n"
31833+ " andl $0xFFFEFFFF, %%eax\n"
31834+ " movl %%eax, %%cr0\n"
31835+#endif
31836+
31837+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
31838+
31839+#ifdef CONFIG_PAX_KERNEXEC
31840+ " movl %0, %%cr0\n"
31841+#endif
31842+
31843 " jmp 2b\n"
31844 ".previous\n"
31845 _ASM_EXTABLE(1b, 3b)
31846- : : "r" (from));
31847+ : "=&r" (cr0) : "r" (from) : "ax");
31848
31849 for ( ; i > 5; i--) {
31850 __asm__ __volatile__ (
31851- "1: prefetch 320(%0)\n"
31852- "2: movq (%0), %%mm0\n"
31853- " movq 8(%0), %%mm1\n"
31854- " movq 16(%0), %%mm2\n"
31855- " movq 24(%0), %%mm3\n"
31856- " movq %%mm0, (%1)\n"
31857- " movq %%mm1, 8(%1)\n"
31858- " movq %%mm2, 16(%1)\n"
31859- " movq %%mm3, 24(%1)\n"
31860- " movq 32(%0), %%mm0\n"
31861- " movq 40(%0), %%mm1\n"
31862- " movq 48(%0), %%mm2\n"
31863- " movq 56(%0), %%mm3\n"
31864- " movq %%mm0, 32(%1)\n"
31865- " movq %%mm1, 40(%1)\n"
31866- " movq %%mm2, 48(%1)\n"
31867- " movq %%mm3, 56(%1)\n"
31868+ "1: prefetch 320(%1)\n"
31869+ "2: movq (%1), %%mm0\n"
31870+ " movq 8(%1), %%mm1\n"
31871+ " movq 16(%1), %%mm2\n"
31872+ " movq 24(%1), %%mm3\n"
31873+ " movq %%mm0, (%2)\n"
31874+ " movq %%mm1, 8(%2)\n"
31875+ " movq %%mm2, 16(%2)\n"
31876+ " movq %%mm3, 24(%2)\n"
31877+ " movq 32(%1), %%mm0\n"
31878+ " movq 40(%1), %%mm1\n"
31879+ " movq 48(%1), %%mm2\n"
31880+ " movq 56(%1), %%mm3\n"
31881+ " movq %%mm0, 32(%2)\n"
31882+ " movq %%mm1, 40(%2)\n"
31883+ " movq %%mm2, 48(%2)\n"
31884+ " movq %%mm3, 56(%2)\n"
31885 ".section .fixup, \"ax\"\n"
31886- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
31887+ "3:\n"
31888+
31889+#ifdef CONFIG_PAX_KERNEXEC
31890+ " movl %%cr0, %0\n"
31891+ " movl %0, %%eax\n"
31892+ " andl $0xFFFEFFFF, %%eax\n"
31893+ " movl %%eax, %%cr0\n"
31894+#endif
31895+
31896+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
31897+
31898+#ifdef CONFIG_PAX_KERNEXEC
31899+ " movl %0, %%cr0\n"
31900+#endif
31901+
31902 " jmp 2b\n"
31903 ".previous\n"
31904 _ASM_EXTABLE(1b, 3b)
31905- : : "r" (from), "r" (to) : "memory");
31906+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
31907
31908 from += 64;
31909 to += 64;
31910@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
31911 static void fast_copy_page(void *to, void *from)
31912 {
31913 int i;
31914+ unsigned long cr0;
31915
31916 kernel_fpu_begin();
31917
31918@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
31919 * but that is for later. -AV
31920 */
31921 __asm__ __volatile__(
31922- "1: prefetch (%0)\n"
31923- " prefetch 64(%0)\n"
31924- " prefetch 128(%0)\n"
31925- " prefetch 192(%0)\n"
31926- " prefetch 256(%0)\n"
31927+ "1: prefetch (%1)\n"
31928+ " prefetch 64(%1)\n"
31929+ " prefetch 128(%1)\n"
31930+ " prefetch 192(%1)\n"
31931+ " prefetch 256(%1)\n"
31932 "2: \n"
31933 ".section .fixup, \"ax\"\n"
31934- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
31935+ "3: \n"
31936+
31937+#ifdef CONFIG_PAX_KERNEXEC
31938+ " movl %%cr0, %0\n"
31939+ " movl %0, %%eax\n"
31940+ " andl $0xFFFEFFFF, %%eax\n"
31941+ " movl %%eax, %%cr0\n"
31942+#endif
31943+
31944+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
31945+
31946+#ifdef CONFIG_PAX_KERNEXEC
31947+ " movl %0, %%cr0\n"
31948+#endif
31949+
31950 " jmp 2b\n"
31951 ".previous\n"
31952- _ASM_EXTABLE(1b, 3b) : : "r" (from));
31953+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
31954
31955 for (i = 0; i < (4096-320)/64; i++) {
31956 __asm__ __volatile__ (
31957- "1: prefetch 320(%0)\n"
31958- "2: movq (%0), %%mm0\n"
31959- " movntq %%mm0, (%1)\n"
31960- " movq 8(%0), %%mm1\n"
31961- " movntq %%mm1, 8(%1)\n"
31962- " movq 16(%0), %%mm2\n"
31963- " movntq %%mm2, 16(%1)\n"
31964- " movq 24(%0), %%mm3\n"
31965- " movntq %%mm3, 24(%1)\n"
31966- " movq 32(%0), %%mm4\n"
31967- " movntq %%mm4, 32(%1)\n"
31968- " movq 40(%0), %%mm5\n"
31969- " movntq %%mm5, 40(%1)\n"
31970- " movq 48(%0), %%mm6\n"
31971- " movntq %%mm6, 48(%1)\n"
31972- " movq 56(%0), %%mm7\n"
31973- " movntq %%mm7, 56(%1)\n"
31974+ "1: prefetch 320(%1)\n"
31975+ "2: movq (%1), %%mm0\n"
31976+ " movntq %%mm0, (%2)\n"
31977+ " movq 8(%1), %%mm1\n"
31978+ " movntq %%mm1, 8(%2)\n"
31979+ " movq 16(%1), %%mm2\n"
31980+ " movntq %%mm2, 16(%2)\n"
31981+ " movq 24(%1), %%mm3\n"
31982+ " movntq %%mm3, 24(%2)\n"
31983+ " movq 32(%1), %%mm4\n"
31984+ " movntq %%mm4, 32(%2)\n"
31985+ " movq 40(%1), %%mm5\n"
31986+ " movntq %%mm5, 40(%2)\n"
31987+ " movq 48(%1), %%mm6\n"
31988+ " movntq %%mm6, 48(%2)\n"
31989+ " movq 56(%1), %%mm7\n"
31990+ " movntq %%mm7, 56(%2)\n"
31991 ".section .fixup, \"ax\"\n"
31992- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
31993+ "3:\n"
31994+
31995+#ifdef CONFIG_PAX_KERNEXEC
31996+ " movl %%cr0, %0\n"
31997+ " movl %0, %%eax\n"
31998+ " andl $0xFFFEFFFF, %%eax\n"
31999+ " movl %%eax, %%cr0\n"
32000+#endif
32001+
32002+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
32003+
32004+#ifdef CONFIG_PAX_KERNEXEC
32005+ " movl %0, %%cr0\n"
32006+#endif
32007+
32008 " jmp 2b\n"
32009 ".previous\n"
32010- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
32011+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
32012
32013 from += 64;
32014 to += 64;
32015@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
32016 static void fast_copy_page(void *to, void *from)
32017 {
32018 int i;
32019+ unsigned long cr0;
32020
32021 kernel_fpu_begin();
32022
32023 __asm__ __volatile__ (
32024- "1: prefetch (%0)\n"
32025- " prefetch 64(%0)\n"
32026- " prefetch 128(%0)\n"
32027- " prefetch 192(%0)\n"
32028- " prefetch 256(%0)\n"
32029+ "1: prefetch (%1)\n"
32030+ " prefetch 64(%1)\n"
32031+ " prefetch 128(%1)\n"
32032+ " prefetch 192(%1)\n"
32033+ " prefetch 256(%1)\n"
32034 "2: \n"
32035 ".section .fixup, \"ax\"\n"
32036- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
32037+ "3: \n"
32038+
32039+#ifdef CONFIG_PAX_KERNEXEC
32040+ " movl %%cr0, %0\n"
32041+ " movl %0, %%eax\n"
32042+ " andl $0xFFFEFFFF, %%eax\n"
32043+ " movl %%eax, %%cr0\n"
32044+#endif
32045+
32046+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
32047+
32048+#ifdef CONFIG_PAX_KERNEXEC
32049+ " movl %0, %%cr0\n"
32050+#endif
32051+
32052 " jmp 2b\n"
32053 ".previous\n"
32054- _ASM_EXTABLE(1b, 3b) : : "r" (from));
32055+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
32056
32057 for (i = 0; i < 4096/64; i++) {
32058 __asm__ __volatile__ (
32059- "1: prefetch 320(%0)\n"
32060- "2: movq (%0), %%mm0\n"
32061- " movq 8(%0), %%mm1\n"
32062- " movq 16(%0), %%mm2\n"
32063- " movq 24(%0), %%mm3\n"
32064- " movq %%mm0, (%1)\n"
32065- " movq %%mm1, 8(%1)\n"
32066- " movq %%mm2, 16(%1)\n"
32067- " movq %%mm3, 24(%1)\n"
32068- " movq 32(%0), %%mm0\n"
32069- " movq 40(%0), %%mm1\n"
32070- " movq 48(%0), %%mm2\n"
32071- " movq 56(%0), %%mm3\n"
32072- " movq %%mm0, 32(%1)\n"
32073- " movq %%mm1, 40(%1)\n"
32074- " movq %%mm2, 48(%1)\n"
32075- " movq %%mm3, 56(%1)\n"
32076+ "1: prefetch 320(%1)\n"
32077+ "2: movq (%1), %%mm0\n"
32078+ " movq 8(%1), %%mm1\n"
32079+ " movq 16(%1), %%mm2\n"
32080+ " movq 24(%1), %%mm3\n"
32081+ " movq %%mm0, (%2)\n"
32082+ " movq %%mm1, 8(%2)\n"
32083+ " movq %%mm2, 16(%2)\n"
32084+ " movq %%mm3, 24(%2)\n"
32085+ " movq 32(%1), %%mm0\n"
32086+ " movq 40(%1), %%mm1\n"
32087+ " movq 48(%1), %%mm2\n"
32088+ " movq 56(%1), %%mm3\n"
32089+ " movq %%mm0, 32(%2)\n"
32090+ " movq %%mm1, 40(%2)\n"
32091+ " movq %%mm2, 48(%2)\n"
32092+ " movq %%mm3, 56(%2)\n"
32093 ".section .fixup, \"ax\"\n"
32094- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
32095+ "3:\n"
32096+
32097+#ifdef CONFIG_PAX_KERNEXEC
32098+ " movl %%cr0, %0\n"
32099+ " movl %0, %%eax\n"
32100+ " andl $0xFFFEFFFF, %%eax\n"
32101+ " movl %%eax, %%cr0\n"
32102+#endif
32103+
32104+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
32105+
32106+#ifdef CONFIG_PAX_KERNEXEC
32107+ " movl %0, %%cr0\n"
32108+#endif
32109+
32110 " jmp 2b\n"
32111 ".previous\n"
32112 _ASM_EXTABLE(1b, 3b)
32113- : : "r" (from), "r" (to) : "memory");
32114+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
32115
32116 from += 64;
32117 to += 64;
32118diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
32119index f6d13ee..d789440 100644
32120--- a/arch/x86/lib/msr-reg.S
32121+++ b/arch/x86/lib/msr-reg.S
32122@@ -3,6 +3,7 @@
32123 #include <asm/dwarf2.h>
32124 #include <asm/asm.h>
32125 #include <asm/msr.h>
32126+#include <asm/alternative-asm.h>
32127
32128 #ifdef CONFIG_X86_64
32129 /*
32130@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
32131 movl %edi, 28(%r10)
32132 popq_cfi %rbp
32133 popq_cfi %rbx
32134+ pax_force_retaddr
32135 ret
32136 3:
32137 CFI_RESTORE_STATE
32138diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
32139index fc6ba17..d4d989d 100644
32140--- a/arch/x86/lib/putuser.S
32141+++ b/arch/x86/lib/putuser.S
32142@@ -16,7 +16,9 @@
32143 #include <asm/errno.h>
32144 #include <asm/asm.h>
32145 #include <asm/smap.h>
32146-
32147+#include <asm/segment.h>
32148+#include <asm/pgtable.h>
32149+#include <asm/alternative-asm.h>
32150
32151 /*
32152 * __put_user_X
32153@@ -30,57 +32,125 @@
32154 * as they get called from within inline assembly.
32155 */
32156
32157-#define ENTER CFI_STARTPROC ; \
32158- GET_THREAD_INFO(%_ASM_BX)
32159-#define EXIT ASM_CLAC ; \
32160- ret ; \
32161+#define ENTER CFI_STARTPROC
32162+#define EXIT ASM_CLAC ; \
32163+ pax_force_retaddr ; \
32164+ ret ; \
32165 CFI_ENDPROC
32166
32167+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32168+#define _DEST %_ASM_CX,%_ASM_BX
32169+#else
32170+#define _DEST %_ASM_CX
32171+#endif
32172+
32173+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
32174+#define __copyuser_seg gs;
32175+#else
32176+#define __copyuser_seg
32177+#endif
32178+
32179 .text
32180 ENTRY(__put_user_1)
32181 ENTER
32182+
32183+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
32184+ GET_THREAD_INFO(%_ASM_BX)
32185 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
32186 jae bad_put_user
32187 ASM_STAC
32188-1: movb %al,(%_ASM_CX)
32189+
32190+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32191+ mov pax_user_shadow_base,%_ASM_BX
32192+ cmp %_ASM_BX,%_ASM_CX
32193+ jb 1234f
32194+ xor %ebx,%ebx
32195+1234:
32196+#endif
32197+
32198+#endif
32199+
32200+1: __copyuser_seg movb %al,(_DEST)
32201 xor %eax,%eax
32202 EXIT
32203 ENDPROC(__put_user_1)
32204
32205 ENTRY(__put_user_2)
32206 ENTER
32207+
32208+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
32209+ GET_THREAD_INFO(%_ASM_BX)
32210 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
32211 sub $1,%_ASM_BX
32212 cmp %_ASM_BX,%_ASM_CX
32213 jae bad_put_user
32214 ASM_STAC
32215-2: movw %ax,(%_ASM_CX)
32216+
32217+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32218+ mov pax_user_shadow_base,%_ASM_BX
32219+ cmp %_ASM_BX,%_ASM_CX
32220+ jb 1234f
32221+ xor %ebx,%ebx
32222+1234:
32223+#endif
32224+
32225+#endif
32226+
32227+2: __copyuser_seg movw %ax,(_DEST)
32228 xor %eax,%eax
32229 EXIT
32230 ENDPROC(__put_user_2)
32231
32232 ENTRY(__put_user_4)
32233 ENTER
32234+
32235+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
32236+ GET_THREAD_INFO(%_ASM_BX)
32237 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
32238 sub $3,%_ASM_BX
32239 cmp %_ASM_BX,%_ASM_CX
32240 jae bad_put_user
32241 ASM_STAC
32242-3: movl %eax,(%_ASM_CX)
32243+
32244+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32245+ mov pax_user_shadow_base,%_ASM_BX
32246+ cmp %_ASM_BX,%_ASM_CX
32247+ jb 1234f
32248+ xor %ebx,%ebx
32249+1234:
32250+#endif
32251+
32252+#endif
32253+
32254+3: __copyuser_seg movl %eax,(_DEST)
32255 xor %eax,%eax
32256 EXIT
32257 ENDPROC(__put_user_4)
32258
32259 ENTRY(__put_user_8)
32260 ENTER
32261+
32262+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
32263+ GET_THREAD_INFO(%_ASM_BX)
32264 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
32265 sub $7,%_ASM_BX
32266 cmp %_ASM_BX,%_ASM_CX
32267 jae bad_put_user
32268 ASM_STAC
32269-4: mov %_ASM_AX,(%_ASM_CX)
32270+
32271+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32272+ mov pax_user_shadow_base,%_ASM_BX
32273+ cmp %_ASM_BX,%_ASM_CX
32274+ jb 1234f
32275+ xor %ebx,%ebx
32276+1234:
32277+#endif
32278+
32279+#endif
32280+
32281+4: __copyuser_seg mov %_ASM_AX,(_DEST)
32282 #ifdef CONFIG_X86_32
32283-5: movl %edx,4(%_ASM_CX)
32284+5: __copyuser_seg movl %edx,4(_DEST)
32285 #endif
32286 xor %eax,%eax
32287 EXIT
32288diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
32289index 1cad221..de671ee 100644
32290--- a/arch/x86/lib/rwlock.S
32291+++ b/arch/x86/lib/rwlock.S
32292@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
32293 FRAME
32294 0: LOCK_PREFIX
32295 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
32296+
32297+#ifdef CONFIG_PAX_REFCOUNT
32298+ jno 1234f
32299+ LOCK_PREFIX
32300+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
32301+ int $4
32302+1234:
32303+ _ASM_EXTABLE(1234b, 1234b)
32304+#endif
32305+
32306 1: rep; nop
32307 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
32308 jne 1b
32309 LOCK_PREFIX
32310 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
32311+
32312+#ifdef CONFIG_PAX_REFCOUNT
32313+ jno 1234f
32314+ LOCK_PREFIX
32315+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
32316+ int $4
32317+1234:
32318+ _ASM_EXTABLE(1234b, 1234b)
32319+#endif
32320+
32321 jnz 0b
32322 ENDFRAME
32323+ pax_force_retaddr
32324 ret
32325 CFI_ENDPROC
32326 END(__write_lock_failed)
32327@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
32328 FRAME
32329 0: LOCK_PREFIX
32330 READ_LOCK_SIZE(inc) (%__lock_ptr)
32331+
32332+#ifdef CONFIG_PAX_REFCOUNT
32333+ jno 1234f
32334+ LOCK_PREFIX
32335+ READ_LOCK_SIZE(dec) (%__lock_ptr)
32336+ int $4
32337+1234:
32338+ _ASM_EXTABLE(1234b, 1234b)
32339+#endif
32340+
32341 1: rep; nop
32342 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
32343 js 1b
32344 LOCK_PREFIX
32345 READ_LOCK_SIZE(dec) (%__lock_ptr)
32346+
32347+#ifdef CONFIG_PAX_REFCOUNT
32348+ jno 1234f
32349+ LOCK_PREFIX
32350+ READ_LOCK_SIZE(inc) (%__lock_ptr)
32351+ int $4
32352+1234:
32353+ _ASM_EXTABLE(1234b, 1234b)
32354+#endif
32355+
32356 js 0b
32357 ENDFRAME
32358+ pax_force_retaddr
32359 ret
32360 CFI_ENDPROC
32361 END(__read_lock_failed)
32362diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
32363index 5dff5f0..cadebf4 100644
32364--- a/arch/x86/lib/rwsem.S
32365+++ b/arch/x86/lib/rwsem.S
32366@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
32367 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
32368 CFI_RESTORE __ASM_REG(dx)
32369 restore_common_regs
32370+ pax_force_retaddr
32371 ret
32372 CFI_ENDPROC
32373 ENDPROC(call_rwsem_down_read_failed)
32374@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
32375 movq %rax,%rdi
32376 call rwsem_down_write_failed
32377 restore_common_regs
32378+ pax_force_retaddr
32379 ret
32380 CFI_ENDPROC
32381 ENDPROC(call_rwsem_down_write_failed)
32382@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
32383 movq %rax,%rdi
32384 call rwsem_wake
32385 restore_common_regs
32386-1: ret
32387+1: pax_force_retaddr
32388+ ret
32389 CFI_ENDPROC
32390 ENDPROC(call_rwsem_wake)
32391
32392@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
32393 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
32394 CFI_RESTORE __ASM_REG(dx)
32395 restore_common_regs
32396+ pax_force_retaddr
32397 ret
32398 CFI_ENDPROC
32399 ENDPROC(call_rwsem_downgrade_wake)
32400diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
32401index 92d9fea..b2762c8 100644
32402--- a/arch/x86/lib/thunk_64.S
32403+++ b/arch/x86/lib/thunk_64.S
32404@@ -9,6 +9,7 @@
32405 #include <asm/dwarf2.h>
32406 #include <asm/calling.h>
32407 #include <asm/asm.h>
32408+#include <asm/alternative-asm.h>
32409
32410 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
32411 .macro THUNK name, func, put_ret_addr_in_rdi=0
32412@@ -16,11 +17,11 @@
32413 \name:
32414 CFI_STARTPROC
32415
32416- /* this one pushes 9 elems, the next one would be %rIP */
32417- SAVE_ARGS
32418+ /* this one pushes 15+1 elems, the next one would be %rIP */
32419+ SAVE_ARGS 8
32420
32421 .if \put_ret_addr_in_rdi
32422- movq_cfi_restore 9*8, rdi
32423+ movq_cfi_restore RIP, rdi
32424 .endif
32425
32426 call \func
32427@@ -40,9 +41,10 @@
32428
32429 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
32430 CFI_STARTPROC
32431- SAVE_ARGS
32432+ SAVE_ARGS 8
32433 restore:
32434- RESTORE_ARGS
32435+ RESTORE_ARGS 1,8
32436+ pax_force_retaddr
32437 ret
32438 CFI_ENDPROC
32439 _ASM_NOKPROBE(restore)
32440diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
32441index e2f5e21..4b22130 100644
32442--- a/arch/x86/lib/usercopy_32.c
32443+++ b/arch/x86/lib/usercopy_32.c
32444@@ -42,11 +42,13 @@ do { \
32445 int __d0; \
32446 might_fault(); \
32447 __asm__ __volatile__( \
32448+ __COPYUSER_SET_ES \
32449 ASM_STAC "\n" \
32450 "0: rep; stosl\n" \
32451 " movl %2,%0\n" \
32452 "1: rep; stosb\n" \
32453 "2: " ASM_CLAC "\n" \
32454+ __COPYUSER_RESTORE_ES \
32455 ".section .fixup,\"ax\"\n" \
32456 "3: lea 0(%2,%0,4),%0\n" \
32457 " jmp 2b\n" \
32458@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
32459
32460 #ifdef CONFIG_X86_INTEL_USERCOPY
32461 static unsigned long
32462-__copy_user_intel(void __user *to, const void *from, unsigned long size)
32463+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
32464 {
32465 int d0, d1;
32466 __asm__ __volatile__(
32467@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
32468 " .align 2,0x90\n"
32469 "3: movl 0(%4), %%eax\n"
32470 "4: movl 4(%4), %%edx\n"
32471- "5: movl %%eax, 0(%3)\n"
32472- "6: movl %%edx, 4(%3)\n"
32473+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
32474+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
32475 "7: movl 8(%4), %%eax\n"
32476 "8: movl 12(%4),%%edx\n"
32477- "9: movl %%eax, 8(%3)\n"
32478- "10: movl %%edx, 12(%3)\n"
32479+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
32480+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
32481 "11: movl 16(%4), %%eax\n"
32482 "12: movl 20(%4), %%edx\n"
32483- "13: movl %%eax, 16(%3)\n"
32484- "14: movl %%edx, 20(%3)\n"
32485+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
32486+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
32487 "15: movl 24(%4), %%eax\n"
32488 "16: movl 28(%4), %%edx\n"
32489- "17: movl %%eax, 24(%3)\n"
32490- "18: movl %%edx, 28(%3)\n"
32491+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
32492+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
32493 "19: movl 32(%4), %%eax\n"
32494 "20: movl 36(%4), %%edx\n"
32495- "21: movl %%eax, 32(%3)\n"
32496- "22: movl %%edx, 36(%3)\n"
32497+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
32498+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
32499 "23: movl 40(%4), %%eax\n"
32500 "24: movl 44(%4), %%edx\n"
32501- "25: movl %%eax, 40(%3)\n"
32502- "26: movl %%edx, 44(%3)\n"
32503+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
32504+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
32505 "27: movl 48(%4), %%eax\n"
32506 "28: movl 52(%4), %%edx\n"
32507- "29: movl %%eax, 48(%3)\n"
32508- "30: movl %%edx, 52(%3)\n"
32509+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
32510+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
32511 "31: movl 56(%4), %%eax\n"
32512 "32: movl 60(%4), %%edx\n"
32513- "33: movl %%eax, 56(%3)\n"
32514- "34: movl %%edx, 60(%3)\n"
32515+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
32516+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
32517 " addl $-64, %0\n"
32518 " addl $64, %4\n"
32519 " addl $64, %3\n"
32520@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
32521 " shrl $2, %0\n"
32522 " andl $3, %%eax\n"
32523 " cld\n"
32524+ __COPYUSER_SET_ES
32525 "99: rep; movsl\n"
32526 "36: movl %%eax, %0\n"
32527 "37: rep; movsb\n"
32528 "100:\n"
32529+ __COPYUSER_RESTORE_ES
32530+ ".section .fixup,\"ax\"\n"
32531+ "101: lea 0(%%eax,%0,4),%0\n"
32532+ " jmp 100b\n"
32533+ ".previous\n"
32534+ _ASM_EXTABLE(1b,100b)
32535+ _ASM_EXTABLE(2b,100b)
32536+ _ASM_EXTABLE(3b,100b)
32537+ _ASM_EXTABLE(4b,100b)
32538+ _ASM_EXTABLE(5b,100b)
32539+ _ASM_EXTABLE(6b,100b)
32540+ _ASM_EXTABLE(7b,100b)
32541+ _ASM_EXTABLE(8b,100b)
32542+ _ASM_EXTABLE(9b,100b)
32543+ _ASM_EXTABLE(10b,100b)
32544+ _ASM_EXTABLE(11b,100b)
32545+ _ASM_EXTABLE(12b,100b)
32546+ _ASM_EXTABLE(13b,100b)
32547+ _ASM_EXTABLE(14b,100b)
32548+ _ASM_EXTABLE(15b,100b)
32549+ _ASM_EXTABLE(16b,100b)
32550+ _ASM_EXTABLE(17b,100b)
32551+ _ASM_EXTABLE(18b,100b)
32552+ _ASM_EXTABLE(19b,100b)
32553+ _ASM_EXTABLE(20b,100b)
32554+ _ASM_EXTABLE(21b,100b)
32555+ _ASM_EXTABLE(22b,100b)
32556+ _ASM_EXTABLE(23b,100b)
32557+ _ASM_EXTABLE(24b,100b)
32558+ _ASM_EXTABLE(25b,100b)
32559+ _ASM_EXTABLE(26b,100b)
32560+ _ASM_EXTABLE(27b,100b)
32561+ _ASM_EXTABLE(28b,100b)
32562+ _ASM_EXTABLE(29b,100b)
32563+ _ASM_EXTABLE(30b,100b)
32564+ _ASM_EXTABLE(31b,100b)
32565+ _ASM_EXTABLE(32b,100b)
32566+ _ASM_EXTABLE(33b,100b)
32567+ _ASM_EXTABLE(34b,100b)
32568+ _ASM_EXTABLE(35b,100b)
32569+ _ASM_EXTABLE(36b,100b)
32570+ _ASM_EXTABLE(37b,100b)
32571+ _ASM_EXTABLE(99b,101b)
32572+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
32573+ : "1"(to), "2"(from), "0"(size)
32574+ : "eax", "edx", "memory");
32575+ return size;
32576+}
32577+
32578+static unsigned long
32579+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
32580+{
32581+ int d0, d1;
32582+ __asm__ __volatile__(
32583+ " .align 2,0x90\n"
32584+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
32585+ " cmpl $67, %0\n"
32586+ " jbe 3f\n"
32587+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
32588+ " .align 2,0x90\n"
32589+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
32590+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
32591+ "5: movl %%eax, 0(%3)\n"
32592+ "6: movl %%edx, 4(%3)\n"
32593+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
32594+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
32595+ "9: movl %%eax, 8(%3)\n"
32596+ "10: movl %%edx, 12(%3)\n"
32597+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
32598+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
32599+ "13: movl %%eax, 16(%3)\n"
32600+ "14: movl %%edx, 20(%3)\n"
32601+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
32602+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
32603+ "17: movl %%eax, 24(%3)\n"
32604+ "18: movl %%edx, 28(%3)\n"
32605+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
32606+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
32607+ "21: movl %%eax, 32(%3)\n"
32608+ "22: movl %%edx, 36(%3)\n"
32609+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
32610+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
32611+ "25: movl %%eax, 40(%3)\n"
32612+ "26: movl %%edx, 44(%3)\n"
32613+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
32614+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
32615+ "29: movl %%eax, 48(%3)\n"
32616+ "30: movl %%edx, 52(%3)\n"
32617+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
32618+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
32619+ "33: movl %%eax, 56(%3)\n"
32620+ "34: movl %%edx, 60(%3)\n"
32621+ " addl $-64, %0\n"
32622+ " addl $64, %4\n"
32623+ " addl $64, %3\n"
32624+ " cmpl $63, %0\n"
32625+ " ja 1b\n"
32626+ "35: movl %0, %%eax\n"
32627+ " shrl $2, %0\n"
32628+ " andl $3, %%eax\n"
32629+ " cld\n"
32630+ "99: rep; "__copyuser_seg" movsl\n"
32631+ "36: movl %%eax, %0\n"
32632+ "37: rep; "__copyuser_seg" movsb\n"
32633+ "100:\n"
32634 ".section .fixup,\"ax\"\n"
32635 "101: lea 0(%%eax,%0,4),%0\n"
32636 " jmp 100b\n"
32637@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
32638 int d0, d1;
32639 __asm__ __volatile__(
32640 " .align 2,0x90\n"
32641- "0: movl 32(%4), %%eax\n"
32642+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
32643 " cmpl $67, %0\n"
32644 " jbe 2f\n"
32645- "1: movl 64(%4), %%eax\n"
32646+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
32647 " .align 2,0x90\n"
32648- "2: movl 0(%4), %%eax\n"
32649- "21: movl 4(%4), %%edx\n"
32650+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
32651+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
32652 " movl %%eax, 0(%3)\n"
32653 " movl %%edx, 4(%3)\n"
32654- "3: movl 8(%4), %%eax\n"
32655- "31: movl 12(%4),%%edx\n"
32656+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
32657+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
32658 " movl %%eax, 8(%3)\n"
32659 " movl %%edx, 12(%3)\n"
32660- "4: movl 16(%4), %%eax\n"
32661- "41: movl 20(%4), %%edx\n"
32662+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
32663+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
32664 " movl %%eax, 16(%3)\n"
32665 " movl %%edx, 20(%3)\n"
32666- "10: movl 24(%4), %%eax\n"
32667- "51: movl 28(%4), %%edx\n"
32668+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
32669+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
32670 " movl %%eax, 24(%3)\n"
32671 " movl %%edx, 28(%3)\n"
32672- "11: movl 32(%4), %%eax\n"
32673- "61: movl 36(%4), %%edx\n"
32674+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
32675+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
32676 " movl %%eax, 32(%3)\n"
32677 " movl %%edx, 36(%3)\n"
32678- "12: movl 40(%4), %%eax\n"
32679- "71: movl 44(%4), %%edx\n"
32680+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
32681+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
32682 " movl %%eax, 40(%3)\n"
32683 " movl %%edx, 44(%3)\n"
32684- "13: movl 48(%4), %%eax\n"
32685- "81: movl 52(%4), %%edx\n"
32686+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
32687+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
32688 " movl %%eax, 48(%3)\n"
32689 " movl %%edx, 52(%3)\n"
32690- "14: movl 56(%4), %%eax\n"
32691- "91: movl 60(%4), %%edx\n"
32692+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
32693+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
32694 " movl %%eax, 56(%3)\n"
32695 " movl %%edx, 60(%3)\n"
32696 " addl $-64, %0\n"
32697@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
32698 " shrl $2, %0\n"
32699 " andl $3, %%eax\n"
32700 " cld\n"
32701- "6: rep; movsl\n"
32702+ "6: rep; "__copyuser_seg" movsl\n"
32703 " movl %%eax,%0\n"
32704- "7: rep; movsb\n"
32705+ "7: rep; "__copyuser_seg" movsb\n"
32706 "8:\n"
32707 ".section .fixup,\"ax\"\n"
32708 "9: lea 0(%%eax,%0,4),%0\n"
32709@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
32710
32711 __asm__ __volatile__(
32712 " .align 2,0x90\n"
32713- "0: movl 32(%4), %%eax\n"
32714+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
32715 " cmpl $67, %0\n"
32716 " jbe 2f\n"
32717- "1: movl 64(%4), %%eax\n"
32718+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
32719 " .align 2,0x90\n"
32720- "2: movl 0(%4), %%eax\n"
32721- "21: movl 4(%4), %%edx\n"
32722+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
32723+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
32724 " movnti %%eax, 0(%3)\n"
32725 " movnti %%edx, 4(%3)\n"
32726- "3: movl 8(%4), %%eax\n"
32727- "31: movl 12(%4),%%edx\n"
32728+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
32729+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
32730 " movnti %%eax, 8(%3)\n"
32731 " movnti %%edx, 12(%3)\n"
32732- "4: movl 16(%4), %%eax\n"
32733- "41: movl 20(%4), %%edx\n"
32734+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
32735+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
32736 " movnti %%eax, 16(%3)\n"
32737 " movnti %%edx, 20(%3)\n"
32738- "10: movl 24(%4), %%eax\n"
32739- "51: movl 28(%4), %%edx\n"
32740+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
32741+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
32742 " movnti %%eax, 24(%3)\n"
32743 " movnti %%edx, 28(%3)\n"
32744- "11: movl 32(%4), %%eax\n"
32745- "61: movl 36(%4), %%edx\n"
32746+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
32747+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
32748 " movnti %%eax, 32(%3)\n"
32749 " movnti %%edx, 36(%3)\n"
32750- "12: movl 40(%4), %%eax\n"
32751- "71: movl 44(%4), %%edx\n"
32752+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
32753+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
32754 " movnti %%eax, 40(%3)\n"
32755 " movnti %%edx, 44(%3)\n"
32756- "13: movl 48(%4), %%eax\n"
32757- "81: movl 52(%4), %%edx\n"
32758+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
32759+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
32760 " movnti %%eax, 48(%3)\n"
32761 " movnti %%edx, 52(%3)\n"
32762- "14: movl 56(%4), %%eax\n"
32763- "91: movl 60(%4), %%edx\n"
32764+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
32765+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
32766 " movnti %%eax, 56(%3)\n"
32767 " movnti %%edx, 60(%3)\n"
32768 " addl $-64, %0\n"
32769@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
32770 " shrl $2, %0\n"
32771 " andl $3, %%eax\n"
32772 " cld\n"
32773- "6: rep; movsl\n"
32774+ "6: rep; "__copyuser_seg" movsl\n"
32775 " movl %%eax,%0\n"
32776- "7: rep; movsb\n"
32777+ "7: rep; "__copyuser_seg" movsb\n"
32778 "8:\n"
32779 ".section .fixup,\"ax\"\n"
32780 "9: lea 0(%%eax,%0,4),%0\n"
32781@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
32782
32783 __asm__ __volatile__(
32784 " .align 2,0x90\n"
32785- "0: movl 32(%4), %%eax\n"
32786+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
32787 " cmpl $67, %0\n"
32788 " jbe 2f\n"
32789- "1: movl 64(%4), %%eax\n"
32790+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
32791 " .align 2,0x90\n"
32792- "2: movl 0(%4), %%eax\n"
32793- "21: movl 4(%4), %%edx\n"
32794+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
32795+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
32796 " movnti %%eax, 0(%3)\n"
32797 " movnti %%edx, 4(%3)\n"
32798- "3: movl 8(%4), %%eax\n"
32799- "31: movl 12(%4),%%edx\n"
32800+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
32801+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
32802 " movnti %%eax, 8(%3)\n"
32803 " movnti %%edx, 12(%3)\n"
32804- "4: movl 16(%4), %%eax\n"
32805- "41: movl 20(%4), %%edx\n"
32806+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
32807+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
32808 " movnti %%eax, 16(%3)\n"
32809 " movnti %%edx, 20(%3)\n"
32810- "10: movl 24(%4), %%eax\n"
32811- "51: movl 28(%4), %%edx\n"
32812+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
32813+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
32814 " movnti %%eax, 24(%3)\n"
32815 " movnti %%edx, 28(%3)\n"
32816- "11: movl 32(%4), %%eax\n"
32817- "61: movl 36(%4), %%edx\n"
32818+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
32819+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
32820 " movnti %%eax, 32(%3)\n"
32821 " movnti %%edx, 36(%3)\n"
32822- "12: movl 40(%4), %%eax\n"
32823- "71: movl 44(%4), %%edx\n"
32824+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
32825+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
32826 " movnti %%eax, 40(%3)\n"
32827 " movnti %%edx, 44(%3)\n"
32828- "13: movl 48(%4), %%eax\n"
32829- "81: movl 52(%4), %%edx\n"
32830+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
32831+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
32832 " movnti %%eax, 48(%3)\n"
32833 " movnti %%edx, 52(%3)\n"
32834- "14: movl 56(%4), %%eax\n"
32835- "91: movl 60(%4), %%edx\n"
32836+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
32837+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
32838 " movnti %%eax, 56(%3)\n"
32839 " movnti %%edx, 60(%3)\n"
32840 " addl $-64, %0\n"
32841@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
32842 " shrl $2, %0\n"
32843 " andl $3, %%eax\n"
32844 " cld\n"
32845- "6: rep; movsl\n"
32846+ "6: rep; "__copyuser_seg" movsl\n"
32847 " movl %%eax,%0\n"
32848- "7: rep; movsb\n"
32849+ "7: rep; "__copyuser_seg" movsb\n"
32850 "8:\n"
32851 ".section .fixup,\"ax\"\n"
32852 "9: lea 0(%%eax,%0,4),%0\n"
32853@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
32854 */
32855 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
32856 unsigned long size);
32857-unsigned long __copy_user_intel(void __user *to, const void *from,
32858+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
32859+ unsigned long size);
32860+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
32861 unsigned long size);
32862 unsigned long __copy_user_zeroing_intel_nocache(void *to,
32863 const void __user *from, unsigned long size);
32864 #endif /* CONFIG_X86_INTEL_USERCOPY */
32865
32866 /* Generic arbitrary sized copy. */
32867-#define __copy_user(to, from, size) \
32868+#define __copy_user(to, from, size, prefix, set, restore) \
32869 do { \
32870 int __d0, __d1, __d2; \
32871 __asm__ __volatile__( \
32872+ set \
32873 " cmp $7,%0\n" \
32874 " jbe 1f\n" \
32875 " movl %1,%0\n" \
32876 " negl %0\n" \
32877 " andl $7,%0\n" \
32878 " subl %0,%3\n" \
32879- "4: rep; movsb\n" \
32880+ "4: rep; "prefix"movsb\n" \
32881 " movl %3,%0\n" \
32882 " shrl $2,%0\n" \
32883 " andl $3,%3\n" \
32884 " .align 2,0x90\n" \
32885- "0: rep; movsl\n" \
32886+ "0: rep; "prefix"movsl\n" \
32887 " movl %3,%0\n" \
32888- "1: rep; movsb\n" \
32889+ "1: rep; "prefix"movsb\n" \
32890 "2:\n" \
32891+ restore \
32892 ".section .fixup,\"ax\"\n" \
32893 "5: addl %3,%0\n" \
32894 " jmp 2b\n" \
32895@@ -538,14 +650,14 @@ do { \
32896 " negl %0\n" \
32897 " andl $7,%0\n" \
32898 " subl %0,%3\n" \
32899- "4: rep; movsb\n" \
32900+ "4: rep; "__copyuser_seg"movsb\n" \
32901 " movl %3,%0\n" \
32902 " shrl $2,%0\n" \
32903 " andl $3,%3\n" \
32904 " .align 2,0x90\n" \
32905- "0: rep; movsl\n" \
32906+ "0: rep; "__copyuser_seg"movsl\n" \
32907 " movl %3,%0\n" \
32908- "1: rep; movsb\n" \
32909+ "1: rep; "__copyuser_seg"movsb\n" \
32910 "2:\n" \
32911 ".section .fixup,\"ax\"\n" \
32912 "5: addl %3,%0\n" \
32913@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
32914 {
32915 stac();
32916 if (movsl_is_ok(to, from, n))
32917- __copy_user(to, from, n);
32918+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
32919 else
32920- n = __copy_user_intel(to, from, n);
32921+ n = __generic_copy_to_user_intel(to, from, n);
32922 clac();
32923 return n;
32924 }
32925@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
32926 {
32927 stac();
32928 if (movsl_is_ok(to, from, n))
32929- __copy_user(to, from, n);
32930+ __copy_user(to, from, n, __copyuser_seg, "", "");
32931 else
32932- n = __copy_user_intel((void __user *)to,
32933- (const void *)from, n);
32934+ n = __generic_copy_from_user_intel(to, from, n);
32935 clac();
32936 return n;
32937 }
32938@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
32939 if (n > 64 && cpu_has_xmm2)
32940 n = __copy_user_intel_nocache(to, from, n);
32941 else
32942- __copy_user(to, from, n);
32943+ __copy_user(to, from, n, __copyuser_seg, "", "");
32944 #else
32945- __copy_user(to, from, n);
32946+ __copy_user(to, from, n, __copyuser_seg, "", "");
32947 #endif
32948 clac();
32949 return n;
32950 }
32951 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
32952
32953-/**
32954- * copy_to_user: - Copy a block of data into user space.
32955- * @to: Destination address, in user space.
32956- * @from: Source address, in kernel space.
32957- * @n: Number of bytes to copy.
32958- *
32959- * Context: User context only. This function may sleep.
32960- *
32961- * Copy data from kernel space to user space.
32962- *
32963- * Returns number of bytes that could not be copied.
32964- * On success, this will be zero.
32965- */
32966-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
32967+#ifdef CONFIG_PAX_MEMORY_UDEREF
32968+void __set_fs(mm_segment_t x)
32969 {
32970- if (access_ok(VERIFY_WRITE, to, n))
32971- n = __copy_to_user(to, from, n);
32972- return n;
32973+ switch (x.seg) {
32974+ case 0:
32975+ loadsegment(gs, 0);
32976+ break;
32977+ case TASK_SIZE_MAX:
32978+ loadsegment(gs, __USER_DS);
32979+ break;
32980+ case -1UL:
32981+ loadsegment(gs, __KERNEL_DS);
32982+ break;
32983+ default:
32984+ BUG();
32985+ }
32986 }
32987-EXPORT_SYMBOL(_copy_to_user);
32988+EXPORT_SYMBOL(__set_fs);
32989
32990-/**
32991- * copy_from_user: - Copy a block of data from user space.
32992- * @to: Destination address, in kernel space.
32993- * @from: Source address, in user space.
32994- * @n: Number of bytes to copy.
32995- *
32996- * Context: User context only. This function may sleep.
32997- *
32998- * Copy data from user space to kernel space.
32999- *
33000- * Returns number of bytes that could not be copied.
33001- * On success, this will be zero.
33002- *
33003- * If some data could not be copied, this function will pad the copied
33004- * data to the requested size using zero bytes.
33005- */
33006-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
33007+void set_fs(mm_segment_t x)
33008 {
33009- if (access_ok(VERIFY_READ, from, n))
33010- n = __copy_from_user(to, from, n);
33011- else
33012- memset(to, 0, n);
33013- return n;
33014+ current_thread_info()->addr_limit = x;
33015+ __set_fs(x);
33016 }
33017-EXPORT_SYMBOL(_copy_from_user);
33018+EXPORT_SYMBOL(set_fs);
33019+#endif
33020diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
33021index c905e89..01ab928 100644
33022--- a/arch/x86/lib/usercopy_64.c
33023+++ b/arch/x86/lib/usercopy_64.c
33024@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
33025 might_fault();
33026 /* no memory constraint because it doesn't change any memory gcc knows
33027 about */
33028+ pax_open_userland();
33029 stac();
33030 asm volatile(
33031 " testq %[size8],%[size8]\n"
33032@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
33033 _ASM_EXTABLE(0b,3b)
33034 _ASM_EXTABLE(1b,2b)
33035 : [size8] "=&c"(size), [dst] "=&D" (__d0)
33036- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
33037+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
33038 [zero] "r" (0UL), [eight] "r" (8UL));
33039 clac();
33040+ pax_close_userland();
33041 return size;
33042 }
33043 EXPORT_SYMBOL(__clear_user);
33044@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
33045 }
33046 EXPORT_SYMBOL(clear_user);
33047
33048-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
33049+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
33050 {
33051- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
33052- return copy_user_generic((__force void *)to, (__force void *)from, len);
33053- }
33054- return len;
33055+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
33056+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
33057+ return len;
33058 }
33059 EXPORT_SYMBOL(copy_in_user);
33060
33061@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
33062 * it is not necessary to optimize tail handling.
33063 */
33064 __visible unsigned long
33065-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
33066+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
33067 {
33068 char c;
33069 unsigned zero_len;
33070
33071+ clac();
33072+ pax_close_userland();
33073 for (; len; --len, to++) {
33074 if (__get_user_nocheck(c, from++, sizeof(char)))
33075 break;
33076@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
33077 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
33078 if (__put_user_nocheck(c, to++, sizeof(char)))
33079 break;
33080- clac();
33081 return len;
33082 }
33083diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
33084index 6a19ad9..1c48f9a 100644
33085--- a/arch/x86/mm/Makefile
33086+++ b/arch/x86/mm/Makefile
33087@@ -30,3 +30,7 @@ obj-$(CONFIG_ACPI_NUMA) += srat.o
33088 obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
33089
33090 obj-$(CONFIG_MEMTEST) += memtest.o
33091+
33092+quote:="
33093+obj-$(CONFIG_X86_64) += uderef_64.o
33094+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
33095diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
33096index 903ec1e..c4166b2 100644
33097--- a/arch/x86/mm/extable.c
33098+++ b/arch/x86/mm/extable.c
33099@@ -6,12 +6,24 @@
33100 static inline unsigned long
33101 ex_insn_addr(const struct exception_table_entry *x)
33102 {
33103- return (unsigned long)&x->insn + x->insn;
33104+ unsigned long reloc = 0;
33105+
33106+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
33107+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
33108+#endif
33109+
33110+ return (unsigned long)&x->insn + x->insn + reloc;
33111 }
33112 static inline unsigned long
33113 ex_fixup_addr(const struct exception_table_entry *x)
33114 {
33115- return (unsigned long)&x->fixup + x->fixup;
33116+ unsigned long reloc = 0;
33117+
33118+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
33119+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
33120+#endif
33121+
33122+ return (unsigned long)&x->fixup + x->fixup + reloc;
33123 }
33124
33125 int fixup_exception(struct pt_regs *regs)
33126@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
33127 unsigned long new_ip;
33128
33129 #ifdef CONFIG_PNPBIOS
33130- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
33131+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
33132 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
33133 extern u32 pnp_bios_is_utter_crap;
33134 pnp_bios_is_utter_crap = 1;
33135@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
33136 i += 4;
33137 p->fixup -= i;
33138 i += 4;
33139+
33140+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
33141+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
33142+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
33143+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
33144+#endif
33145+
33146 }
33147 }
33148
33149diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
33150index a241946..d7a04cf 100644
33151--- a/arch/x86/mm/fault.c
33152+++ b/arch/x86/mm/fault.c
33153@@ -14,12 +14,19 @@
33154 #include <linux/hugetlb.h> /* hstate_index_to_shift */
33155 #include <linux/prefetch.h> /* prefetchw */
33156 #include <linux/context_tracking.h> /* exception_enter(), ... */
33157+#include <linux/unistd.h>
33158+#include <linux/compiler.h>
33159
33160 #include <asm/traps.h> /* dotraplinkage, ... */
33161 #include <asm/pgalloc.h> /* pgd_*(), ... */
33162 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
33163 #include <asm/fixmap.h> /* VSYSCALL_ADDR */
33164 #include <asm/vsyscall.h> /* emulate_vsyscall */
33165+#include <asm/tlbflush.h>
33166+
33167+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33168+#include <asm/stacktrace.h>
33169+#endif
33170
33171 #define CREATE_TRACE_POINTS
33172 #include <asm/trace/exceptions.h>
33173@@ -60,7 +67,7 @@ static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
33174 int ret = 0;
33175
33176 /* kprobe_running() needs smp_processor_id() */
33177- if (kprobes_built_in() && !user_mode_vm(regs)) {
33178+ if (kprobes_built_in() && !user_mode(regs)) {
33179 preempt_disable();
33180 if (kprobe_running() && kprobe_fault_handler(regs, 14))
33181 ret = 1;
33182@@ -121,7 +128,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
33183 return !instr_lo || (instr_lo>>1) == 1;
33184 case 0x00:
33185 /* Prefetch instruction is 0x0F0D or 0x0F18 */
33186- if (probe_kernel_address(instr, opcode))
33187+ if (user_mode(regs)) {
33188+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
33189+ return 0;
33190+ } else if (probe_kernel_address(instr, opcode))
33191 return 0;
33192
33193 *prefetch = (instr_lo == 0xF) &&
33194@@ -155,7 +165,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
33195 while (instr < max_instr) {
33196 unsigned char opcode;
33197
33198- if (probe_kernel_address(instr, opcode))
33199+ if (user_mode(regs)) {
33200+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
33201+ break;
33202+ } else if (probe_kernel_address(instr, opcode))
33203 break;
33204
33205 instr++;
33206@@ -186,6 +199,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
33207 force_sig_info(si_signo, &info, tsk);
33208 }
33209
33210+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
33211+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
33212+#endif
33213+
33214+#ifdef CONFIG_PAX_EMUTRAMP
33215+static int pax_handle_fetch_fault(struct pt_regs *regs);
33216+#endif
33217+
33218+#ifdef CONFIG_PAX_PAGEEXEC
33219+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
33220+{
33221+ pgd_t *pgd;
33222+ pud_t *pud;
33223+ pmd_t *pmd;
33224+
33225+ pgd = pgd_offset(mm, address);
33226+ if (!pgd_present(*pgd))
33227+ return NULL;
33228+ pud = pud_offset(pgd, address);
33229+ if (!pud_present(*pud))
33230+ return NULL;
33231+ pmd = pmd_offset(pud, address);
33232+ if (!pmd_present(*pmd))
33233+ return NULL;
33234+ return pmd;
33235+}
33236+#endif
33237+
33238 DEFINE_SPINLOCK(pgd_lock);
33239 LIST_HEAD(pgd_list);
33240
33241@@ -236,10 +277,27 @@ void vmalloc_sync_all(void)
33242 for (address = VMALLOC_START & PMD_MASK;
33243 address >= TASK_SIZE && address < FIXADDR_TOP;
33244 address += PMD_SIZE) {
33245+
33246+#ifdef CONFIG_PAX_PER_CPU_PGD
33247+ unsigned long cpu;
33248+#else
33249 struct page *page;
33250+#endif
33251
33252 spin_lock(&pgd_lock);
33253+
33254+#ifdef CONFIG_PAX_PER_CPU_PGD
33255+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33256+ pgd_t *pgd = get_cpu_pgd(cpu, user);
33257+ pmd_t *ret;
33258+
33259+ ret = vmalloc_sync_one(pgd, address);
33260+ if (!ret)
33261+ break;
33262+ pgd = get_cpu_pgd(cpu, kernel);
33263+#else
33264 list_for_each_entry(page, &pgd_list, lru) {
33265+ pgd_t *pgd;
33266 spinlock_t *pgt_lock;
33267 pmd_t *ret;
33268
33269@@ -247,8 +305,14 @@ void vmalloc_sync_all(void)
33270 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
33271
33272 spin_lock(pgt_lock);
33273- ret = vmalloc_sync_one(page_address(page), address);
33274+ pgd = page_address(page);
33275+#endif
33276+
33277+ ret = vmalloc_sync_one(pgd, address);
33278+
33279+#ifndef CONFIG_PAX_PER_CPU_PGD
33280 spin_unlock(pgt_lock);
33281+#endif
33282
33283 if (!ret)
33284 break;
33285@@ -282,6 +346,12 @@ static noinline int vmalloc_fault(unsigned long address)
33286 * an interrupt in the middle of a task switch..
33287 */
33288 pgd_paddr = read_cr3();
33289+
33290+#ifdef CONFIG_PAX_PER_CPU_PGD
33291+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
33292+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
33293+#endif
33294+
33295 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
33296 if (!pmd_k)
33297 return -1;
33298@@ -378,11 +448,25 @@ static noinline int vmalloc_fault(unsigned long address)
33299 * happen within a race in page table update. In the later
33300 * case just flush:
33301 */
33302- pgd = pgd_offset(current->active_mm, address);
33303+
33304 pgd_ref = pgd_offset_k(address);
33305 if (pgd_none(*pgd_ref))
33306 return -1;
33307
33308+#ifdef CONFIG_PAX_PER_CPU_PGD
33309+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
33310+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
33311+ if (pgd_none(*pgd)) {
33312+ set_pgd(pgd, *pgd_ref);
33313+ arch_flush_lazy_mmu_mode();
33314+ } else {
33315+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
33316+ }
33317+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
33318+#else
33319+ pgd = pgd_offset(current->active_mm, address);
33320+#endif
33321+
33322 if (pgd_none(*pgd)) {
33323 set_pgd(pgd, *pgd_ref);
33324 arch_flush_lazy_mmu_mode();
33325@@ -549,7 +633,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
33326 static int is_errata100(struct pt_regs *regs, unsigned long address)
33327 {
33328 #ifdef CONFIG_X86_64
33329- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
33330+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
33331 return 1;
33332 #endif
33333 return 0;
33334@@ -576,9 +660,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
33335 }
33336
33337 static const char nx_warning[] = KERN_CRIT
33338-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
33339+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
33340 static const char smep_warning[] = KERN_CRIT
33341-"unable to execute userspace code (SMEP?) (uid: %d)\n";
33342+"unable to execute userspace code (SMEP?) (uid: %d, task: %s, pid: %d)\n";
33343
33344 static void
33345 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
33346@@ -587,7 +671,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
33347 if (!oops_may_print())
33348 return;
33349
33350- if (error_code & PF_INSTR) {
33351+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
33352 unsigned int level;
33353 pgd_t *pgd;
33354 pte_t *pte;
33355@@ -598,13 +682,25 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
33356 pte = lookup_address_in_pgd(pgd, address, &level);
33357
33358 if (pte && pte_present(*pte) && !pte_exec(*pte))
33359- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
33360+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
33361 if (pte && pte_present(*pte) && pte_exec(*pte) &&
33362 (pgd_flags(*pgd) & _PAGE_USER) &&
33363 (read_cr4() & X86_CR4_SMEP))
33364- printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
33365+ printk(smep_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
33366 }
33367
33368+#ifdef CONFIG_PAX_KERNEXEC
33369+ if (init_mm.start_code <= address && address < init_mm.end_code) {
33370+ if (current->signal->curr_ip)
33371+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
33372+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
33373+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
33374+ else
33375+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
33376+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
33377+ }
33378+#endif
33379+
33380 printk(KERN_ALERT "BUG: unable to handle kernel ");
33381 if (address < PAGE_SIZE)
33382 printk(KERN_CONT "NULL pointer dereference");
33383@@ -785,6 +881,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
33384 return;
33385 }
33386 #endif
33387+
33388+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
33389+ if (pax_is_fetch_fault(regs, error_code, address)) {
33390+
33391+#ifdef CONFIG_PAX_EMUTRAMP
33392+ switch (pax_handle_fetch_fault(regs)) {
33393+ case 2:
33394+ return;
33395+ }
33396+#endif
33397+
33398+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
33399+ do_group_exit(SIGKILL);
33400+ }
33401+#endif
33402+
33403 /* Kernel addresses are always protection faults: */
33404 if (address >= TASK_SIZE)
33405 error_code |= PF_PROT;
33406@@ -870,7 +982,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
33407 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
33408 printk(KERN_ERR
33409 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
33410- tsk->comm, tsk->pid, address);
33411+ tsk->comm, task_pid_nr(tsk), address);
33412 code = BUS_MCEERR_AR;
33413 }
33414 #endif
33415@@ -924,6 +1036,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
33416 return 1;
33417 }
33418
33419+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
33420+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
33421+{
33422+ pte_t *pte;
33423+ pmd_t *pmd;
33424+ spinlock_t *ptl;
33425+ unsigned char pte_mask;
33426+
33427+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
33428+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
33429+ return 0;
33430+
33431+ /* PaX: it's our fault, let's handle it if we can */
33432+
33433+ /* PaX: take a look at read faults before acquiring any locks */
33434+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
33435+ /* instruction fetch attempt from a protected page in user mode */
33436+ up_read(&mm->mmap_sem);
33437+
33438+#ifdef CONFIG_PAX_EMUTRAMP
33439+ switch (pax_handle_fetch_fault(regs)) {
33440+ case 2:
33441+ return 1;
33442+ }
33443+#endif
33444+
33445+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
33446+ do_group_exit(SIGKILL);
33447+ }
33448+
33449+ pmd = pax_get_pmd(mm, address);
33450+ if (unlikely(!pmd))
33451+ return 0;
33452+
33453+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
33454+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
33455+ pte_unmap_unlock(pte, ptl);
33456+ return 0;
33457+ }
33458+
33459+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
33460+ /* write attempt to a protected page in user mode */
33461+ pte_unmap_unlock(pte, ptl);
33462+ return 0;
33463+ }
33464+
33465+#ifdef CONFIG_SMP
33466+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
33467+#else
33468+ if (likely(address > get_limit(regs->cs)))
33469+#endif
33470+ {
33471+ set_pte(pte, pte_mkread(*pte));
33472+ __flush_tlb_one(address);
33473+ pte_unmap_unlock(pte, ptl);
33474+ up_read(&mm->mmap_sem);
33475+ return 1;
33476+ }
33477+
33478+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
33479+
33480+ /*
33481+ * PaX: fill DTLB with user rights and retry
33482+ */
33483+ __asm__ __volatile__ (
33484+ "orb %2,(%1)\n"
33485+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
33486+/*
33487+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
33488+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
33489+ * page fault when examined during a TLB load attempt. this is true not only
33490+ * for PTEs holding a non-present entry but also present entries that will
33491+ * raise a page fault (such as those set up by PaX, or the copy-on-write
33492+ * mechanism). in effect it means that we do *not* need to flush the TLBs
33493+ * for our target pages since their PTEs are simply not in the TLBs at all.
33494+
33495+ * the best thing in omitting it is that we gain around 15-20% speed in the
33496+ * fast path of the page fault handler and can get rid of tracing since we
33497+ * can no longer flush unintended entries.
33498+ */
33499+ "invlpg (%0)\n"
33500+#endif
33501+ __copyuser_seg"testb $0,(%0)\n"
33502+ "xorb %3,(%1)\n"
33503+ :
33504+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
33505+ : "memory", "cc");
33506+ pte_unmap_unlock(pte, ptl);
33507+ up_read(&mm->mmap_sem);
33508+ return 1;
33509+}
33510+#endif
33511+
33512 /*
33513 * Handle a spurious fault caused by a stale TLB entry.
33514 *
33515@@ -991,6 +1196,9 @@ int show_unhandled_signals = 1;
33516 static inline int
33517 access_error(unsigned long error_code, struct vm_area_struct *vma)
33518 {
33519+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
33520+ return 1;
33521+
33522 if (error_code & PF_WRITE) {
33523 /* write, present and write, not present: */
33524 if (unlikely(!(vma->vm_flags & VM_WRITE)))
33525@@ -1025,7 +1233,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
33526 if (error_code & PF_USER)
33527 return false;
33528
33529- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
33530+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
33531 return false;
33532
33533 return true;
33534@@ -1053,6 +1261,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
33535 tsk = current;
33536 mm = tsk->mm;
33537
33538+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33539+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
33540+ if (!search_exception_tables(regs->ip)) {
33541+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
33542+ bad_area_nosemaphore(regs, error_code, address);
33543+ return;
33544+ }
33545+ if (address < pax_user_shadow_base) {
33546+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
33547+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
33548+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
33549+ } else
33550+ address -= pax_user_shadow_base;
33551+ }
33552+#endif
33553+
33554 /*
33555 * Detect and handle instructions that would cause a page fault for
33556 * both a tracked kernel page and a userspace page.
33557@@ -1130,7 +1354,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
33558 * User-mode registers count as a user access even for any
33559 * potential system fault or CPU buglet:
33560 */
33561- if (user_mode_vm(regs)) {
33562+ if (user_mode(regs)) {
33563 local_irq_enable();
33564 error_code |= PF_USER;
33565 flags |= FAULT_FLAG_USER;
33566@@ -1177,6 +1401,11 @@ retry:
33567 might_sleep();
33568 }
33569
33570+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
33571+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
33572+ return;
33573+#endif
33574+
33575 vma = find_vma(mm, address);
33576 if (unlikely(!vma)) {
33577 bad_area(regs, error_code, address);
33578@@ -1188,18 +1417,24 @@ retry:
33579 bad_area(regs, error_code, address);
33580 return;
33581 }
33582- if (error_code & PF_USER) {
33583- /*
33584- * Accessing the stack below %sp is always a bug.
33585- * The large cushion allows instructions like enter
33586- * and pusha to work. ("enter $65535, $31" pushes
33587- * 32 pointers and then decrements %sp by 65535.)
33588- */
33589- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
33590- bad_area(regs, error_code, address);
33591- return;
33592- }
33593+ /*
33594+ * Accessing the stack below %sp is always a bug.
33595+ * The large cushion allows instructions like enter
33596+ * and pusha to work. ("enter $65535, $31" pushes
33597+ * 32 pointers and then decrements %sp by 65535.)
33598+ */
33599+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
33600+ bad_area(regs, error_code, address);
33601+ return;
33602 }
33603+
33604+#ifdef CONFIG_PAX_SEGMEXEC
33605+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
33606+ bad_area(regs, error_code, address);
33607+ return;
33608+ }
33609+#endif
33610+
33611 if (unlikely(expand_stack(vma, address))) {
33612 bad_area(regs, error_code, address);
33613 return;
33614@@ -1316,3 +1551,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
33615 }
33616 NOKPROBE_SYMBOL(trace_do_page_fault);
33617 #endif /* CONFIG_TRACING */
33618+
33619+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
33620+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
33621+{
33622+ struct mm_struct *mm = current->mm;
33623+ unsigned long ip = regs->ip;
33624+
33625+ if (v8086_mode(regs))
33626+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
33627+
33628+#ifdef CONFIG_PAX_PAGEEXEC
33629+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
33630+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
33631+ return true;
33632+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
33633+ return true;
33634+ return false;
33635+ }
33636+#endif
33637+
33638+#ifdef CONFIG_PAX_SEGMEXEC
33639+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
33640+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
33641+ return true;
33642+ return false;
33643+ }
33644+#endif
33645+
33646+ return false;
33647+}
33648+#endif
33649+
33650+#ifdef CONFIG_PAX_EMUTRAMP
33651+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
33652+{
33653+ int err;
33654+
33655+ do { /* PaX: libffi trampoline emulation */
33656+ unsigned char mov, jmp;
33657+ unsigned int addr1, addr2;
33658+
33659+#ifdef CONFIG_X86_64
33660+ if ((regs->ip + 9) >> 32)
33661+ break;
33662+#endif
33663+
33664+ err = get_user(mov, (unsigned char __user *)regs->ip);
33665+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
33666+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
33667+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
33668+
33669+ if (err)
33670+ break;
33671+
33672+ if (mov == 0xB8 && jmp == 0xE9) {
33673+ regs->ax = addr1;
33674+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
33675+ return 2;
33676+ }
33677+ } while (0);
33678+
33679+ do { /* PaX: gcc trampoline emulation #1 */
33680+ unsigned char mov1, mov2;
33681+ unsigned short jmp;
33682+ unsigned int addr1, addr2;
33683+
33684+#ifdef CONFIG_X86_64
33685+ if ((regs->ip + 11) >> 32)
33686+ break;
33687+#endif
33688+
33689+ err = get_user(mov1, (unsigned char __user *)regs->ip);
33690+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
33691+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
33692+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
33693+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
33694+
33695+ if (err)
33696+ break;
33697+
33698+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
33699+ regs->cx = addr1;
33700+ regs->ax = addr2;
33701+ regs->ip = addr2;
33702+ return 2;
33703+ }
33704+ } while (0);
33705+
33706+ do { /* PaX: gcc trampoline emulation #2 */
33707+ unsigned char mov, jmp;
33708+ unsigned int addr1, addr2;
33709+
33710+#ifdef CONFIG_X86_64
33711+ if ((regs->ip + 9) >> 32)
33712+ break;
33713+#endif
33714+
33715+ err = get_user(mov, (unsigned char __user *)regs->ip);
33716+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
33717+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
33718+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
33719+
33720+ if (err)
33721+ break;
33722+
33723+ if (mov == 0xB9 && jmp == 0xE9) {
33724+ regs->cx = addr1;
33725+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
33726+ return 2;
33727+ }
33728+ } while (0);
33729+
33730+ return 1; /* PaX in action */
33731+}
33732+
33733+#ifdef CONFIG_X86_64
33734+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
33735+{
33736+ int err;
33737+
33738+ do { /* PaX: libffi trampoline emulation */
33739+ unsigned short mov1, mov2, jmp1;
33740+ unsigned char stcclc, jmp2;
33741+ unsigned long addr1, addr2;
33742+
33743+ err = get_user(mov1, (unsigned short __user *)regs->ip);
33744+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
33745+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
33746+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
33747+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
33748+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
33749+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
33750+
33751+ if (err)
33752+ break;
33753+
33754+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
33755+ regs->r11 = addr1;
33756+ regs->r10 = addr2;
33757+ if (stcclc == 0xF8)
33758+ regs->flags &= ~X86_EFLAGS_CF;
33759+ else
33760+ regs->flags |= X86_EFLAGS_CF;
33761+ regs->ip = addr1;
33762+ return 2;
33763+ }
33764+ } while (0);
33765+
33766+ do { /* PaX: gcc trampoline emulation #1 */
33767+ unsigned short mov1, mov2, jmp1;
33768+ unsigned char jmp2;
33769+ unsigned int addr1;
33770+ unsigned long addr2;
33771+
33772+ err = get_user(mov1, (unsigned short __user *)regs->ip);
33773+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
33774+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
33775+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
33776+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
33777+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
33778+
33779+ if (err)
33780+ break;
33781+
33782+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
33783+ regs->r11 = addr1;
33784+ regs->r10 = addr2;
33785+ regs->ip = addr1;
33786+ return 2;
33787+ }
33788+ } while (0);
33789+
33790+ do { /* PaX: gcc trampoline emulation #2 */
33791+ unsigned short mov1, mov2, jmp1;
33792+ unsigned char jmp2;
33793+ unsigned long addr1, addr2;
33794+
33795+ err = get_user(mov1, (unsigned short __user *)regs->ip);
33796+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
33797+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
33798+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
33799+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
33800+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
33801+
33802+ if (err)
33803+ break;
33804+
33805+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
33806+ regs->r11 = addr1;
33807+ regs->r10 = addr2;
33808+ regs->ip = addr1;
33809+ return 2;
33810+ }
33811+ } while (0);
33812+
33813+ return 1; /* PaX in action */
33814+}
33815+#endif
33816+
33817+/*
33818+ * PaX: decide what to do with offenders (regs->ip = fault address)
33819+ *
33820+ * returns 1 when task should be killed
33821+ * 2 when gcc trampoline was detected
33822+ */
33823+static int pax_handle_fetch_fault(struct pt_regs *regs)
33824+{
33825+ if (v8086_mode(regs))
33826+ return 1;
33827+
33828+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
33829+ return 1;
33830+
33831+#ifdef CONFIG_X86_32
33832+ return pax_handle_fetch_fault_32(regs);
33833+#else
33834+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
33835+ return pax_handle_fetch_fault_32(regs);
33836+ else
33837+ return pax_handle_fetch_fault_64(regs);
33838+#endif
33839+}
33840+#endif
33841+
33842+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
33843+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
33844+{
33845+ long i;
33846+
33847+ printk(KERN_ERR "PAX: bytes at PC: ");
33848+ for (i = 0; i < 20; i++) {
33849+ unsigned char c;
33850+ if (get_user(c, (unsigned char __force_user *)pc+i))
33851+ printk(KERN_CONT "?? ");
33852+ else
33853+ printk(KERN_CONT "%02x ", c);
33854+ }
33855+ printk("\n");
33856+
33857+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
33858+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
33859+ unsigned long c;
33860+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
33861+#ifdef CONFIG_X86_32
33862+ printk(KERN_CONT "???????? ");
33863+#else
33864+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
33865+ printk(KERN_CONT "???????? ???????? ");
33866+ else
33867+ printk(KERN_CONT "???????????????? ");
33868+#endif
33869+ } else {
33870+#ifdef CONFIG_X86_64
33871+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
33872+ printk(KERN_CONT "%08x ", (unsigned int)c);
33873+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
33874+ } else
33875+#endif
33876+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
33877+ }
33878+ }
33879+ printk("\n");
33880+}
33881+#endif
33882+
33883+/**
33884+ * probe_kernel_write(): safely attempt to write to a location
33885+ * @dst: address to write to
33886+ * @src: pointer to the data that shall be written
33887+ * @size: size of the data chunk
33888+ *
33889+ * Safely write to address @dst from the buffer at @src. If a kernel fault
33890+ * happens, handle that and return -EFAULT.
33891+ */
33892+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
33893+{
33894+ long ret;
33895+ mm_segment_t old_fs = get_fs();
33896+
33897+ set_fs(KERNEL_DS);
33898+ pagefault_disable();
33899+ pax_open_kernel();
33900+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
33901+ pax_close_kernel();
33902+ pagefault_enable();
33903+ set_fs(old_fs);
33904+
33905+ return ret ? -EFAULT : 0;
33906+}
33907diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
33908index 207d9aef..69030980 100644
33909--- a/arch/x86/mm/gup.c
33910+++ b/arch/x86/mm/gup.c
33911@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
33912 addr = start;
33913 len = (unsigned long) nr_pages << PAGE_SHIFT;
33914 end = start + len;
33915- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
33916+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
33917 (void __user *)start, len)))
33918 return 0;
33919
33920@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
33921 goto slow_irqon;
33922 #endif
33923
33924+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
33925+ (void __user *)start, len)))
33926+ return 0;
33927+
33928 /*
33929 * XXX: batch / limit 'nr', to avoid large irq off latency
33930 * needs some instrumenting to determine the common sizes used by
33931diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
33932index 4500142..53a363c 100644
33933--- a/arch/x86/mm/highmem_32.c
33934+++ b/arch/x86/mm/highmem_32.c
33935@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
33936 idx = type + KM_TYPE_NR*smp_processor_id();
33937 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
33938 BUG_ON(!pte_none(*(kmap_pte-idx)));
33939+
33940+ pax_open_kernel();
33941 set_pte(kmap_pte-idx, mk_pte(page, prot));
33942+ pax_close_kernel();
33943+
33944 arch_flush_lazy_mmu_mode();
33945
33946 return (void *)vaddr;
33947diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
33948index 8b977eb..4732c33 100644
33949--- a/arch/x86/mm/hugetlbpage.c
33950+++ b/arch/x86/mm/hugetlbpage.c
33951@@ -80,23 +80,24 @@ int pud_huge(pud_t pud)
33952 #ifdef CONFIG_HUGETLB_PAGE
33953 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
33954 unsigned long addr, unsigned long len,
33955- unsigned long pgoff, unsigned long flags)
33956+ unsigned long pgoff, unsigned long flags, unsigned long offset)
33957 {
33958 struct hstate *h = hstate_file(file);
33959 struct vm_unmapped_area_info info;
33960-
33961+
33962 info.flags = 0;
33963 info.length = len;
33964 info.low_limit = current->mm->mmap_legacy_base;
33965 info.high_limit = TASK_SIZE;
33966 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
33967 info.align_offset = 0;
33968+ info.threadstack_offset = offset;
33969 return vm_unmapped_area(&info);
33970 }
33971
33972 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
33973 unsigned long addr0, unsigned long len,
33974- unsigned long pgoff, unsigned long flags)
33975+ unsigned long pgoff, unsigned long flags, unsigned long offset)
33976 {
33977 struct hstate *h = hstate_file(file);
33978 struct vm_unmapped_area_info info;
33979@@ -108,6 +109,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
33980 info.high_limit = current->mm->mmap_base;
33981 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
33982 info.align_offset = 0;
33983+ info.threadstack_offset = offset;
33984 addr = vm_unmapped_area(&info);
33985
33986 /*
33987@@ -120,6 +122,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
33988 VM_BUG_ON(addr != -ENOMEM);
33989 info.flags = 0;
33990 info.low_limit = TASK_UNMAPPED_BASE;
33991+
33992+#ifdef CONFIG_PAX_RANDMMAP
33993+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
33994+ info.low_limit += current->mm->delta_mmap;
33995+#endif
33996+
33997 info.high_limit = TASK_SIZE;
33998 addr = vm_unmapped_area(&info);
33999 }
34000@@ -134,10 +142,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
34001 struct hstate *h = hstate_file(file);
34002 struct mm_struct *mm = current->mm;
34003 struct vm_area_struct *vma;
34004+ unsigned long pax_task_size = TASK_SIZE;
34005+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
34006
34007 if (len & ~huge_page_mask(h))
34008 return -EINVAL;
34009- if (len > TASK_SIZE)
34010+
34011+#ifdef CONFIG_PAX_SEGMEXEC
34012+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
34013+ pax_task_size = SEGMEXEC_TASK_SIZE;
34014+#endif
34015+
34016+ pax_task_size -= PAGE_SIZE;
34017+
34018+ if (len > pax_task_size)
34019 return -ENOMEM;
34020
34021 if (flags & MAP_FIXED) {
34022@@ -146,19 +164,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
34023 return addr;
34024 }
34025
34026+#ifdef CONFIG_PAX_RANDMMAP
34027+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
34028+#endif
34029+
34030 if (addr) {
34031 addr = ALIGN(addr, huge_page_size(h));
34032 vma = find_vma(mm, addr);
34033- if (TASK_SIZE - len >= addr &&
34034- (!vma || addr + len <= vma->vm_start))
34035+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
34036 return addr;
34037 }
34038 if (mm->get_unmapped_area == arch_get_unmapped_area)
34039 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
34040- pgoff, flags);
34041+ pgoff, flags, offset);
34042 else
34043 return hugetlb_get_unmapped_area_topdown(file, addr, len,
34044- pgoff, flags);
34045+ pgoff, flags, offset);
34046 }
34047 #endif /* CONFIG_HUGETLB_PAGE */
34048
34049diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
34050index 66dba36..f8082ec 100644
34051--- a/arch/x86/mm/init.c
34052+++ b/arch/x86/mm/init.c
34053@@ -4,6 +4,7 @@
34054 #include <linux/swap.h>
34055 #include <linux/memblock.h>
34056 #include <linux/bootmem.h> /* for max_low_pfn */
34057+#include <linux/tboot.h>
34058
34059 #include <asm/cacheflush.h>
34060 #include <asm/e820.h>
34061@@ -17,6 +18,8 @@
34062 #include <asm/proto.h>
34063 #include <asm/dma.h> /* for MAX_DMA_PFN */
34064 #include <asm/microcode.h>
34065+#include <asm/desc.h>
34066+#include <asm/bios_ebda.h>
34067
34068 /*
34069 * We need to define the tracepoints somewhere, and tlb.c
34070@@ -570,7 +573,18 @@ void __init init_mem_mapping(void)
34071 early_ioremap_page_table_range_init();
34072 #endif
34073
34074+#ifdef CONFIG_PAX_PER_CPU_PGD
34075+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
34076+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
34077+ KERNEL_PGD_PTRS);
34078+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
34079+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
34080+ KERNEL_PGD_PTRS);
34081+ load_cr3(get_cpu_pgd(0, kernel));
34082+#else
34083 load_cr3(swapper_pg_dir);
34084+#endif
34085+
34086 __flush_tlb_all();
34087
34088 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
34089@@ -586,10 +600,40 @@ void __init init_mem_mapping(void)
34090 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
34091 * mmio resources as well as potential bios/acpi data regions.
34092 */
34093+
34094+#ifdef CONFIG_GRKERNSEC_KMEM
34095+static unsigned int ebda_start __read_only;
34096+static unsigned int ebda_end __read_only;
34097+#endif
34098+
34099 int devmem_is_allowed(unsigned long pagenr)
34100 {
34101- if (pagenr < 256)
34102+#ifdef CONFIG_GRKERNSEC_KMEM
34103+ /* allow BDA */
34104+ if (!pagenr)
34105 return 1;
34106+ /* allow EBDA */
34107+ if (pagenr >= ebda_start && pagenr < ebda_end)
34108+ return 1;
34109+ /* if tboot is in use, allow access to its hardcoded serial log range */
34110+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
34111+ return 1;
34112+#else
34113+ if (!pagenr)
34114+ return 1;
34115+#ifdef CONFIG_VM86
34116+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
34117+ return 1;
34118+#endif
34119+#endif
34120+
34121+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
34122+ return 1;
34123+#ifdef CONFIG_GRKERNSEC_KMEM
34124+ /* throw out everything else below 1MB */
34125+ if (pagenr <= 256)
34126+ return 0;
34127+#endif
34128 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
34129 return 0;
34130 if (!page_is_ram(pagenr))
34131@@ -635,8 +679,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
34132 #endif
34133 }
34134
34135+#ifdef CONFIG_GRKERNSEC_KMEM
34136+static inline void gr_init_ebda(void)
34137+{
34138+ unsigned int ebda_addr;
34139+ unsigned int ebda_size = 0;
34140+
34141+ ebda_addr = get_bios_ebda();
34142+ if (ebda_addr) {
34143+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
34144+ ebda_size <<= 10;
34145+ }
34146+ if (ebda_addr && ebda_size) {
34147+ ebda_start = ebda_addr >> PAGE_SHIFT;
34148+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
34149+ } else {
34150+ ebda_start = 0x9f000 >> PAGE_SHIFT;
34151+ ebda_end = 0xa0000 >> PAGE_SHIFT;
34152+ }
34153+}
34154+#else
34155+static inline void gr_init_ebda(void) { }
34156+#endif
34157+
34158 void free_initmem(void)
34159 {
34160+#ifdef CONFIG_PAX_KERNEXEC
34161+#ifdef CONFIG_X86_32
34162+ /* PaX: limit KERNEL_CS to actual size */
34163+ unsigned long addr, limit;
34164+ struct desc_struct d;
34165+ int cpu;
34166+#else
34167+ pgd_t *pgd;
34168+ pud_t *pud;
34169+ pmd_t *pmd;
34170+ unsigned long addr, end;
34171+#endif
34172+#endif
34173+
34174+ gr_init_ebda();
34175+
34176+#ifdef CONFIG_PAX_KERNEXEC
34177+#ifdef CONFIG_X86_32
34178+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
34179+ limit = (limit - 1UL) >> PAGE_SHIFT;
34180+
34181+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
34182+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
34183+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
34184+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
34185+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
34186+ }
34187+
34188+ /* PaX: make KERNEL_CS read-only */
34189+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
34190+ if (!paravirt_enabled())
34191+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
34192+/*
34193+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
34194+ pgd = pgd_offset_k(addr);
34195+ pud = pud_offset(pgd, addr);
34196+ pmd = pmd_offset(pud, addr);
34197+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
34198+ }
34199+*/
34200+#ifdef CONFIG_X86_PAE
34201+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
34202+/*
34203+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
34204+ pgd = pgd_offset_k(addr);
34205+ pud = pud_offset(pgd, addr);
34206+ pmd = pmd_offset(pud, addr);
34207+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
34208+ }
34209+*/
34210+#endif
34211+
34212+#ifdef CONFIG_MODULES
34213+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
34214+#endif
34215+
34216+#else
34217+ /* PaX: make kernel code/rodata read-only, rest non-executable */
34218+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
34219+ pgd = pgd_offset_k(addr);
34220+ pud = pud_offset(pgd, addr);
34221+ pmd = pmd_offset(pud, addr);
34222+ if (!pmd_present(*pmd))
34223+ continue;
34224+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
34225+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
34226+ else
34227+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
34228+ }
34229+
34230+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
34231+ end = addr + KERNEL_IMAGE_SIZE;
34232+ for (; addr < end; addr += PMD_SIZE) {
34233+ pgd = pgd_offset_k(addr);
34234+ pud = pud_offset(pgd, addr);
34235+ pmd = pmd_offset(pud, addr);
34236+ if (!pmd_present(*pmd))
34237+ continue;
34238+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
34239+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
34240+ }
34241+#endif
34242+
34243+ flush_tlb_all();
34244+#endif
34245+
34246 free_init_pages("unused kernel",
34247 (unsigned long)(&__init_begin),
34248 (unsigned long)(&__init_end));
34249diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
34250index 7d05565..bfc5338 100644
34251--- a/arch/x86/mm/init_32.c
34252+++ b/arch/x86/mm/init_32.c
34253@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
34254 bool __read_mostly __vmalloc_start_set = false;
34255
34256 /*
34257- * Creates a middle page table and puts a pointer to it in the
34258- * given global directory entry. This only returns the gd entry
34259- * in non-PAE compilation mode, since the middle layer is folded.
34260- */
34261-static pmd_t * __init one_md_table_init(pgd_t *pgd)
34262-{
34263- pud_t *pud;
34264- pmd_t *pmd_table;
34265-
34266-#ifdef CONFIG_X86_PAE
34267- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
34268- pmd_table = (pmd_t *)alloc_low_page();
34269- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
34270- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
34271- pud = pud_offset(pgd, 0);
34272- BUG_ON(pmd_table != pmd_offset(pud, 0));
34273-
34274- return pmd_table;
34275- }
34276-#endif
34277- pud = pud_offset(pgd, 0);
34278- pmd_table = pmd_offset(pud, 0);
34279-
34280- return pmd_table;
34281-}
34282-
34283-/*
34284 * Create a page table and place a pointer to it in a middle page
34285 * directory entry:
34286 */
34287@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
34288 pte_t *page_table = (pte_t *)alloc_low_page();
34289
34290 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
34291+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
34292+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
34293+#else
34294 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
34295+#endif
34296 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
34297 }
34298
34299 return pte_offset_kernel(pmd, 0);
34300 }
34301
34302+static pmd_t * __init one_md_table_init(pgd_t *pgd)
34303+{
34304+ pud_t *pud;
34305+ pmd_t *pmd_table;
34306+
34307+ pud = pud_offset(pgd, 0);
34308+ pmd_table = pmd_offset(pud, 0);
34309+
34310+ return pmd_table;
34311+}
34312+
34313 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
34314 {
34315 int pgd_idx = pgd_index(vaddr);
34316@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
34317 int pgd_idx, pmd_idx;
34318 unsigned long vaddr;
34319 pgd_t *pgd;
34320+ pud_t *pud;
34321 pmd_t *pmd;
34322 pte_t *pte = NULL;
34323 unsigned long count = page_table_range_init_count(start, end);
34324@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
34325 pgd = pgd_base + pgd_idx;
34326
34327 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
34328- pmd = one_md_table_init(pgd);
34329- pmd = pmd + pmd_index(vaddr);
34330+ pud = pud_offset(pgd, vaddr);
34331+ pmd = pmd_offset(pud, vaddr);
34332+
34333+#ifdef CONFIG_X86_PAE
34334+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
34335+#endif
34336+
34337 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
34338 pmd++, pmd_idx++) {
34339 pte = page_table_kmap_check(one_page_table_init(pmd),
34340@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
34341 }
34342 }
34343
34344-static inline int is_kernel_text(unsigned long addr)
34345+static inline int is_kernel_text(unsigned long start, unsigned long end)
34346 {
34347- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
34348- return 1;
34349- return 0;
34350+ if ((start >= ktla_ktva((unsigned long)_etext) ||
34351+ end <= ktla_ktva((unsigned long)_stext)) &&
34352+ (start >= ktla_ktva((unsigned long)_einittext) ||
34353+ end <= ktla_ktva((unsigned long)_sinittext)) &&
34354+
34355+#ifdef CONFIG_ACPI_SLEEP
34356+ (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
34357+#endif
34358+
34359+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
34360+ return 0;
34361+ return 1;
34362 }
34363
34364 /*
34365@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
34366 unsigned long last_map_addr = end;
34367 unsigned long start_pfn, end_pfn;
34368 pgd_t *pgd_base = swapper_pg_dir;
34369- int pgd_idx, pmd_idx, pte_ofs;
34370+ unsigned int pgd_idx, pmd_idx, pte_ofs;
34371 unsigned long pfn;
34372 pgd_t *pgd;
34373+ pud_t *pud;
34374 pmd_t *pmd;
34375 pte_t *pte;
34376 unsigned pages_2m, pages_4k;
34377@@ -291,8 +295,13 @@ repeat:
34378 pfn = start_pfn;
34379 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
34380 pgd = pgd_base + pgd_idx;
34381- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
34382- pmd = one_md_table_init(pgd);
34383+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
34384+ pud = pud_offset(pgd, 0);
34385+ pmd = pmd_offset(pud, 0);
34386+
34387+#ifdef CONFIG_X86_PAE
34388+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
34389+#endif
34390
34391 if (pfn >= end_pfn)
34392 continue;
34393@@ -304,14 +313,13 @@ repeat:
34394 #endif
34395 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
34396 pmd++, pmd_idx++) {
34397- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
34398+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
34399
34400 /*
34401 * Map with big pages if possible, otherwise
34402 * create normal page tables:
34403 */
34404 if (use_pse) {
34405- unsigned int addr2;
34406 pgprot_t prot = PAGE_KERNEL_LARGE;
34407 /*
34408 * first pass will use the same initial
34409@@ -322,11 +330,7 @@ repeat:
34410 _PAGE_PSE);
34411
34412 pfn &= PMD_MASK >> PAGE_SHIFT;
34413- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
34414- PAGE_OFFSET + PAGE_SIZE-1;
34415-
34416- if (is_kernel_text(addr) ||
34417- is_kernel_text(addr2))
34418+ if (is_kernel_text(address, address + PMD_SIZE))
34419 prot = PAGE_KERNEL_LARGE_EXEC;
34420
34421 pages_2m++;
34422@@ -343,7 +347,7 @@ repeat:
34423 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
34424 pte += pte_ofs;
34425 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
34426- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
34427+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
34428 pgprot_t prot = PAGE_KERNEL;
34429 /*
34430 * first pass will use the same initial
34431@@ -351,7 +355,7 @@ repeat:
34432 */
34433 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
34434
34435- if (is_kernel_text(addr))
34436+ if (is_kernel_text(address, address + PAGE_SIZE))
34437 prot = PAGE_KERNEL_EXEC;
34438
34439 pages_4k++;
34440@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
34441
34442 pud = pud_offset(pgd, va);
34443 pmd = pmd_offset(pud, va);
34444- if (!pmd_present(*pmd))
34445+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
34446 break;
34447
34448 /* should not be large page here */
34449@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
34450
34451 static void __init pagetable_init(void)
34452 {
34453- pgd_t *pgd_base = swapper_pg_dir;
34454-
34455- permanent_kmaps_init(pgd_base);
34456+ permanent_kmaps_init(swapper_pg_dir);
34457 }
34458
34459-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
34460+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
34461 EXPORT_SYMBOL_GPL(__supported_pte_mask);
34462
34463 /* user-defined highmem size */
34464@@ -787,10 +789,10 @@ void __init mem_init(void)
34465 ((unsigned long)&__init_end -
34466 (unsigned long)&__init_begin) >> 10,
34467
34468- (unsigned long)&_etext, (unsigned long)&_edata,
34469- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
34470+ (unsigned long)&_sdata, (unsigned long)&_edata,
34471+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
34472
34473- (unsigned long)&_text, (unsigned long)&_etext,
34474+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
34475 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
34476
34477 /*
34478@@ -884,6 +886,7 @@ void set_kernel_text_rw(void)
34479 if (!kernel_set_to_readonly)
34480 return;
34481
34482+ start = ktla_ktva(start);
34483 pr_debug("Set kernel text: %lx - %lx for read write\n",
34484 start, start+size);
34485
34486@@ -898,6 +901,7 @@ void set_kernel_text_ro(void)
34487 if (!kernel_set_to_readonly)
34488 return;
34489
34490+ start = ktla_ktva(start);
34491 pr_debug("Set kernel text: %lx - %lx for read only\n",
34492 start, start+size);
34493
34494@@ -926,6 +930,7 @@ void mark_rodata_ro(void)
34495 unsigned long start = PFN_ALIGN(_text);
34496 unsigned long size = PFN_ALIGN(_etext) - start;
34497
34498+ start = ktla_ktva(start);
34499 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
34500 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
34501 size >> 10);
34502diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
34503index 5621c47..5e17b7390 100644
34504--- a/arch/x86/mm/init_64.c
34505+++ b/arch/x86/mm/init_64.c
34506@@ -151,7 +151,7 @@ early_param("gbpages", parse_direct_gbpages_on);
34507 * around without checking the pgd every time.
34508 */
34509
34510-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
34511+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
34512 EXPORT_SYMBOL_GPL(__supported_pte_mask);
34513
34514 int force_personality32;
34515@@ -184,12 +184,29 @@ void sync_global_pgds(unsigned long start, unsigned long end)
34516
34517 for (address = start; address <= end; address += PGDIR_SIZE) {
34518 const pgd_t *pgd_ref = pgd_offset_k(address);
34519+
34520+#ifdef CONFIG_PAX_PER_CPU_PGD
34521+ unsigned long cpu;
34522+#else
34523 struct page *page;
34524+#endif
34525
34526 if (pgd_none(*pgd_ref))
34527 continue;
34528
34529 spin_lock(&pgd_lock);
34530+
34531+#ifdef CONFIG_PAX_PER_CPU_PGD
34532+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
34533+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
34534+
34535+ if (pgd_none(*pgd))
34536+ set_pgd(pgd, *pgd_ref);
34537+ else
34538+ BUG_ON(pgd_page_vaddr(*pgd)
34539+ != pgd_page_vaddr(*pgd_ref));
34540+ pgd = pgd_offset_cpu(cpu, kernel, address);
34541+#else
34542 list_for_each_entry(page, &pgd_list, lru) {
34543 pgd_t *pgd;
34544 spinlock_t *pgt_lock;
34545@@ -198,6 +215,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
34546 /* the pgt_lock only for Xen */
34547 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
34548 spin_lock(pgt_lock);
34549+#endif
34550
34551 if (pgd_none(*pgd))
34552 set_pgd(pgd, *pgd_ref);
34553@@ -205,7 +223,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
34554 BUG_ON(pgd_page_vaddr(*pgd)
34555 != pgd_page_vaddr(*pgd_ref));
34556
34557+#ifndef CONFIG_PAX_PER_CPU_PGD
34558 spin_unlock(pgt_lock);
34559+#endif
34560+
34561 }
34562 spin_unlock(&pgd_lock);
34563 }
34564@@ -238,7 +259,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
34565 {
34566 if (pgd_none(*pgd)) {
34567 pud_t *pud = (pud_t *)spp_getpage();
34568- pgd_populate(&init_mm, pgd, pud);
34569+ pgd_populate_kernel(&init_mm, pgd, pud);
34570 if (pud != pud_offset(pgd, 0))
34571 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
34572 pud, pud_offset(pgd, 0));
34573@@ -250,7 +271,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
34574 {
34575 if (pud_none(*pud)) {
34576 pmd_t *pmd = (pmd_t *) spp_getpage();
34577- pud_populate(&init_mm, pud, pmd);
34578+ pud_populate_kernel(&init_mm, pud, pmd);
34579 if (pmd != pmd_offset(pud, 0))
34580 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
34581 pmd, pmd_offset(pud, 0));
34582@@ -279,7 +300,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
34583 pmd = fill_pmd(pud, vaddr);
34584 pte = fill_pte(pmd, vaddr);
34585
34586+ pax_open_kernel();
34587 set_pte(pte, new_pte);
34588+ pax_close_kernel();
34589
34590 /*
34591 * It's enough to flush this one mapping.
34592@@ -338,14 +361,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
34593 pgd = pgd_offset_k((unsigned long)__va(phys));
34594 if (pgd_none(*pgd)) {
34595 pud = (pud_t *) spp_getpage();
34596- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
34597- _PAGE_USER));
34598+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
34599 }
34600 pud = pud_offset(pgd, (unsigned long)__va(phys));
34601 if (pud_none(*pud)) {
34602 pmd = (pmd_t *) spp_getpage();
34603- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
34604- _PAGE_USER));
34605+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
34606 }
34607 pmd = pmd_offset(pud, phys);
34608 BUG_ON(!pmd_none(*pmd));
34609@@ -586,7 +607,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
34610 prot);
34611
34612 spin_lock(&init_mm.page_table_lock);
34613- pud_populate(&init_mm, pud, pmd);
34614+ pud_populate_kernel(&init_mm, pud, pmd);
34615 spin_unlock(&init_mm.page_table_lock);
34616 }
34617 __flush_tlb_all();
34618@@ -627,7 +648,7 @@ kernel_physical_mapping_init(unsigned long start,
34619 page_size_mask);
34620
34621 spin_lock(&init_mm.page_table_lock);
34622- pgd_populate(&init_mm, pgd, pud);
34623+ pgd_populate_kernel(&init_mm, pgd, pud);
34624 spin_unlock(&init_mm.page_table_lock);
34625 pgd_changed = true;
34626 }
34627@@ -1196,8 +1217,8 @@ static struct vm_operations_struct gate_vma_ops = {
34628 static struct vm_area_struct gate_vma = {
34629 .vm_start = VSYSCALL_ADDR,
34630 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
34631- .vm_page_prot = PAGE_READONLY_EXEC,
34632- .vm_flags = VM_READ | VM_EXEC,
34633+ .vm_page_prot = PAGE_READONLY,
34634+ .vm_flags = VM_READ,
34635 .vm_ops = &gate_vma_ops,
34636 };
34637
34638diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
34639index 7b179b49..6bd17777 100644
34640--- a/arch/x86/mm/iomap_32.c
34641+++ b/arch/x86/mm/iomap_32.c
34642@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
34643 type = kmap_atomic_idx_push();
34644 idx = type + KM_TYPE_NR * smp_processor_id();
34645 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
34646+
34647+ pax_open_kernel();
34648 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
34649+ pax_close_kernel();
34650+
34651 arch_flush_lazy_mmu_mode();
34652
34653 return (void *)vaddr;
34654diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
34655index baff1da..2816ef4 100644
34656--- a/arch/x86/mm/ioremap.c
34657+++ b/arch/x86/mm/ioremap.c
34658@@ -56,8 +56,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
34659 unsigned long i;
34660
34661 for (i = 0; i < nr_pages; ++i)
34662- if (pfn_valid(start_pfn + i) &&
34663- !PageReserved(pfn_to_page(start_pfn + i)))
34664+ if (pfn_valid(start_pfn + i) && (start_pfn + i >= 0x100 ||
34665+ !PageReserved(pfn_to_page(start_pfn + i))))
34666 return 1;
34667
34668 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
34669@@ -268,7 +268,7 @@ EXPORT_SYMBOL(ioremap_prot);
34670 *
34671 * Caller must ensure there is only one unmapping for the same pointer.
34672 */
34673-void iounmap(volatile void __iomem *addr)
34674+void iounmap(const volatile void __iomem *addr)
34675 {
34676 struct vm_struct *p, *o;
34677
34678@@ -322,6 +322,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
34679
34680 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
34681 if (page_is_ram(start >> PAGE_SHIFT))
34682+#ifdef CONFIG_HIGHMEM
34683+ if ((start >> PAGE_SHIFT) < max_low_pfn)
34684+#endif
34685 return __va(phys);
34686
34687 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
34688@@ -334,13 +337,16 @@ void *xlate_dev_mem_ptr(unsigned long phys)
34689 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
34690 {
34691 if (page_is_ram(phys >> PAGE_SHIFT))
34692+#ifdef CONFIG_HIGHMEM
34693+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
34694+#endif
34695 return;
34696
34697 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
34698 return;
34699 }
34700
34701-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
34702+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
34703
34704 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
34705 {
34706@@ -376,8 +382,7 @@ void __init early_ioremap_init(void)
34707 early_ioremap_setup();
34708
34709 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
34710- memset(bm_pte, 0, sizeof(bm_pte));
34711- pmd_populate_kernel(&init_mm, pmd, bm_pte);
34712+ pmd_populate_user(&init_mm, pmd, bm_pte);
34713
34714 /*
34715 * The boot-ioremap range spans multiple pmds, for which
34716diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
34717index dd89a13..d77bdcc 100644
34718--- a/arch/x86/mm/kmemcheck/kmemcheck.c
34719+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
34720@@ -628,9 +628,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
34721 * memory (e.g. tracked pages)? For now, we need this to avoid
34722 * invoking kmemcheck for PnP BIOS calls.
34723 */
34724- if (regs->flags & X86_VM_MASK)
34725+ if (v8086_mode(regs))
34726 return false;
34727- if (regs->cs != __KERNEL_CS)
34728+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
34729 return false;
34730
34731 pte = kmemcheck_pte_lookup(address);
34732diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
34733index 919b912..9267313 100644
34734--- a/arch/x86/mm/mmap.c
34735+++ b/arch/x86/mm/mmap.c
34736@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
34737 * Leave an at least ~128 MB hole with possible stack randomization.
34738 */
34739 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
34740-#define MAX_GAP (TASK_SIZE/6*5)
34741+#define MAX_GAP (pax_task_size/6*5)
34742
34743 static int mmap_is_legacy(void)
34744 {
34745@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
34746 return rnd << PAGE_SHIFT;
34747 }
34748
34749-static unsigned long mmap_base(void)
34750+static unsigned long mmap_base(struct mm_struct *mm)
34751 {
34752 unsigned long gap = rlimit(RLIMIT_STACK);
34753+ unsigned long pax_task_size = TASK_SIZE;
34754+
34755+#ifdef CONFIG_PAX_SEGMEXEC
34756+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
34757+ pax_task_size = SEGMEXEC_TASK_SIZE;
34758+#endif
34759
34760 if (gap < MIN_GAP)
34761 gap = MIN_GAP;
34762 else if (gap > MAX_GAP)
34763 gap = MAX_GAP;
34764
34765- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
34766+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
34767 }
34768
34769 /*
34770 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
34771 * does, but not when emulating X86_32
34772 */
34773-static unsigned long mmap_legacy_base(void)
34774+static unsigned long mmap_legacy_base(struct mm_struct *mm)
34775 {
34776- if (mmap_is_ia32())
34777+ if (mmap_is_ia32()) {
34778+
34779+#ifdef CONFIG_PAX_SEGMEXEC
34780+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
34781+ return SEGMEXEC_TASK_UNMAPPED_BASE;
34782+ else
34783+#endif
34784+
34785 return TASK_UNMAPPED_BASE;
34786- else
34787+ } else
34788 return TASK_UNMAPPED_BASE + mmap_rnd();
34789 }
34790
34791@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
34792 */
34793 void arch_pick_mmap_layout(struct mm_struct *mm)
34794 {
34795- mm->mmap_legacy_base = mmap_legacy_base();
34796- mm->mmap_base = mmap_base();
34797+ mm->mmap_legacy_base = mmap_legacy_base(mm);
34798+ mm->mmap_base = mmap_base(mm);
34799+
34800+#ifdef CONFIG_PAX_RANDMMAP
34801+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
34802+ mm->mmap_legacy_base += mm->delta_mmap;
34803+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
34804+ }
34805+#endif
34806
34807 if (mmap_is_legacy()) {
34808 mm->mmap_base = mm->mmap_legacy_base;
34809diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
34810index 0057a7a..95c7edd 100644
34811--- a/arch/x86/mm/mmio-mod.c
34812+++ b/arch/x86/mm/mmio-mod.c
34813@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
34814 break;
34815 default:
34816 {
34817- unsigned char *ip = (unsigned char *)instptr;
34818+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
34819 my_trace->opcode = MMIO_UNKNOWN_OP;
34820 my_trace->width = 0;
34821 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
34822@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
34823 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
34824 void __iomem *addr)
34825 {
34826- static atomic_t next_id;
34827+ static atomic_unchecked_t next_id;
34828 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
34829 /* These are page-unaligned. */
34830 struct mmiotrace_map map = {
34831@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
34832 .private = trace
34833 },
34834 .phys = offset,
34835- .id = atomic_inc_return(&next_id)
34836+ .id = atomic_inc_return_unchecked(&next_id)
34837 };
34838 map.map_id = trace->id;
34839
34840@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
34841 ioremap_trace_core(offset, size, addr);
34842 }
34843
34844-static void iounmap_trace_core(volatile void __iomem *addr)
34845+static void iounmap_trace_core(const volatile void __iomem *addr)
34846 {
34847 struct mmiotrace_map map = {
34848 .phys = 0,
34849@@ -328,7 +328,7 @@ not_enabled:
34850 }
34851 }
34852
34853-void mmiotrace_iounmap(volatile void __iomem *addr)
34854+void mmiotrace_iounmap(const volatile void __iomem *addr)
34855 {
34856 might_sleep();
34857 if (is_enabled()) /* recheck and proper locking in *_core() */
34858diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
34859index a32b706..efb308b 100644
34860--- a/arch/x86/mm/numa.c
34861+++ b/arch/x86/mm/numa.c
34862@@ -478,7 +478,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
34863 return true;
34864 }
34865
34866-static int __init numa_register_memblks(struct numa_meminfo *mi)
34867+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
34868 {
34869 unsigned long uninitialized_var(pfn_align);
34870 int i, nid;
34871diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
34872index ae242a7..1c7998f 100644
34873--- a/arch/x86/mm/pageattr.c
34874+++ b/arch/x86/mm/pageattr.c
34875@@ -262,7 +262,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
34876 */
34877 #ifdef CONFIG_PCI_BIOS
34878 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
34879- pgprot_val(forbidden) |= _PAGE_NX;
34880+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
34881 #endif
34882
34883 /*
34884@@ -270,9 +270,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
34885 * Does not cover __inittext since that is gone later on. On
34886 * 64bit we do not enforce !NX on the low mapping
34887 */
34888- if (within(address, (unsigned long)_text, (unsigned long)_etext))
34889- pgprot_val(forbidden) |= _PAGE_NX;
34890+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
34891+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
34892
34893+#ifdef CONFIG_DEBUG_RODATA
34894 /*
34895 * The .rodata section needs to be read-only. Using the pfn
34896 * catches all aliases.
34897@@ -280,6 +281,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
34898 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
34899 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
34900 pgprot_val(forbidden) |= _PAGE_RW;
34901+#endif
34902
34903 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
34904 /*
34905@@ -318,6 +320,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
34906 }
34907 #endif
34908
34909+#ifdef CONFIG_PAX_KERNEXEC
34910+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
34911+ pgprot_val(forbidden) |= _PAGE_RW;
34912+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
34913+ }
34914+#endif
34915+
34916 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
34917
34918 return prot;
34919@@ -420,23 +429,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
34920 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
34921 {
34922 /* change init_mm */
34923+ pax_open_kernel();
34924 set_pte_atomic(kpte, pte);
34925+
34926 #ifdef CONFIG_X86_32
34927 if (!SHARED_KERNEL_PMD) {
34928+
34929+#ifdef CONFIG_PAX_PER_CPU_PGD
34930+ unsigned long cpu;
34931+#else
34932 struct page *page;
34933+#endif
34934
34935+#ifdef CONFIG_PAX_PER_CPU_PGD
34936+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
34937+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
34938+#else
34939 list_for_each_entry(page, &pgd_list, lru) {
34940- pgd_t *pgd;
34941+ pgd_t *pgd = (pgd_t *)page_address(page);
34942+#endif
34943+
34944 pud_t *pud;
34945 pmd_t *pmd;
34946
34947- pgd = (pgd_t *)page_address(page) + pgd_index(address);
34948+ pgd += pgd_index(address);
34949 pud = pud_offset(pgd, address);
34950 pmd = pmd_offset(pud, address);
34951 set_pte_atomic((pte_t *)pmd, pte);
34952 }
34953 }
34954 #endif
34955+ pax_close_kernel();
34956 }
34957
34958 static int
34959diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
34960index 6574388..87e9bef 100644
34961--- a/arch/x86/mm/pat.c
34962+++ b/arch/x86/mm/pat.c
34963@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
34964
34965 if (!entry) {
34966 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
34967- current->comm, current->pid, start, end - 1);
34968+ current->comm, task_pid_nr(current), start, end - 1);
34969 return -EINVAL;
34970 }
34971
34972@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34973
34974 while (cursor < to) {
34975 if (!devmem_is_allowed(pfn)) {
34976- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
34977- current->comm, from, to - 1);
34978+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
34979+ current->comm, from, to - 1, cursor);
34980 return 0;
34981 }
34982 cursor += PAGE_SIZE;
34983@@ -577,7 +577,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
34984 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
34985 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
34986 "for [mem %#010Lx-%#010Lx]\n",
34987- current->comm, current->pid,
34988+ current->comm, task_pid_nr(current),
34989 cattr_name(flags),
34990 base, (unsigned long long)(base + size-1));
34991 return -EINVAL;
34992@@ -612,7 +612,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
34993 flags = lookup_memtype(paddr);
34994 if (want_flags != flags) {
34995 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
34996- current->comm, current->pid,
34997+ current->comm, task_pid_nr(current),
34998 cattr_name(want_flags),
34999 (unsigned long long)paddr,
35000 (unsigned long long)(paddr + size - 1),
35001@@ -634,7 +634,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
35002 free_memtype(paddr, paddr + size);
35003 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
35004 " for [mem %#010Lx-%#010Lx], got %s\n",
35005- current->comm, current->pid,
35006+ current->comm, task_pid_nr(current),
35007 cattr_name(want_flags),
35008 (unsigned long long)paddr,
35009 (unsigned long long)(paddr + size - 1),
35010diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
35011index 415f6c4..d319983 100644
35012--- a/arch/x86/mm/pat_rbtree.c
35013+++ b/arch/x86/mm/pat_rbtree.c
35014@@ -160,7 +160,7 @@ success:
35015
35016 failure:
35017 printk(KERN_INFO "%s:%d conflicting memory types "
35018- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
35019+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
35020 end, cattr_name(found_type), cattr_name(match->type));
35021 return -EBUSY;
35022 }
35023diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
35024index 9f0614d..92ae64a 100644
35025--- a/arch/x86/mm/pf_in.c
35026+++ b/arch/x86/mm/pf_in.c
35027@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
35028 int i;
35029 enum reason_type rv = OTHERS;
35030
35031- p = (unsigned char *)ins_addr;
35032+ p = (unsigned char *)ktla_ktva(ins_addr);
35033 p += skip_prefix(p, &prf);
35034 p += get_opcode(p, &opcode);
35035
35036@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
35037 struct prefix_bits prf;
35038 int i;
35039
35040- p = (unsigned char *)ins_addr;
35041+ p = (unsigned char *)ktla_ktva(ins_addr);
35042 p += skip_prefix(p, &prf);
35043 p += get_opcode(p, &opcode);
35044
35045@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
35046 struct prefix_bits prf;
35047 int i;
35048
35049- p = (unsigned char *)ins_addr;
35050+ p = (unsigned char *)ktla_ktva(ins_addr);
35051 p += skip_prefix(p, &prf);
35052 p += get_opcode(p, &opcode);
35053
35054@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
35055 struct prefix_bits prf;
35056 int i;
35057
35058- p = (unsigned char *)ins_addr;
35059+ p = (unsigned char *)ktla_ktva(ins_addr);
35060 p += skip_prefix(p, &prf);
35061 p += get_opcode(p, &opcode);
35062 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
35063@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
35064 struct prefix_bits prf;
35065 int i;
35066
35067- p = (unsigned char *)ins_addr;
35068+ p = (unsigned char *)ktla_ktva(ins_addr);
35069 p += skip_prefix(p, &prf);
35070 p += get_opcode(p, &opcode);
35071 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
35072diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
35073index 6fb6927..4fc13c0 100644
35074--- a/arch/x86/mm/pgtable.c
35075+++ b/arch/x86/mm/pgtable.c
35076@@ -97,10 +97,71 @@ static inline void pgd_list_del(pgd_t *pgd)
35077 list_del(&page->lru);
35078 }
35079
35080-#define UNSHARED_PTRS_PER_PGD \
35081- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
35082+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
35083+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
35084
35085+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
35086+{
35087+ unsigned int count = USER_PGD_PTRS;
35088
35089+ if (!pax_user_shadow_base)
35090+ return;
35091+
35092+ while (count--)
35093+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
35094+}
35095+#endif
35096+
35097+#ifdef CONFIG_PAX_PER_CPU_PGD
35098+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
35099+{
35100+ unsigned int count = USER_PGD_PTRS;
35101+
35102+ while (count--) {
35103+ pgd_t pgd;
35104+
35105+#ifdef CONFIG_X86_64
35106+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
35107+#else
35108+ pgd = *src++;
35109+#endif
35110+
35111+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
35112+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
35113+#endif
35114+
35115+ *dst++ = pgd;
35116+ }
35117+
35118+}
35119+#endif
35120+
35121+#ifdef CONFIG_X86_64
35122+#define pxd_t pud_t
35123+#define pyd_t pgd_t
35124+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
35125+#define pgtable_pxd_page_ctor(page) true
35126+#define pgtable_pxd_page_dtor(page)
35127+#define pxd_free(mm, pud) pud_free((mm), (pud))
35128+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
35129+#define pyd_offset(mm, address) pgd_offset((mm), (address))
35130+#define PYD_SIZE PGDIR_SIZE
35131+#else
35132+#define pxd_t pmd_t
35133+#define pyd_t pud_t
35134+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
35135+#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page)
35136+#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page)
35137+#define pxd_free(mm, pud) pmd_free((mm), (pud))
35138+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
35139+#define pyd_offset(mm, address) pud_offset((mm), (address))
35140+#define PYD_SIZE PUD_SIZE
35141+#endif
35142+
35143+#ifdef CONFIG_PAX_PER_CPU_PGD
35144+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
35145+static inline void pgd_dtor(pgd_t *pgd) {}
35146+#else
35147 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
35148 {
35149 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
35150@@ -141,6 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
35151 pgd_list_del(pgd);
35152 spin_unlock(&pgd_lock);
35153 }
35154+#endif
35155
35156 /*
35157 * List of all pgd's needed for non-PAE so it can invalidate entries
35158@@ -153,7 +215,7 @@ static void pgd_dtor(pgd_t *pgd)
35159 * -- nyc
35160 */
35161
35162-#ifdef CONFIG_X86_PAE
35163+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
35164 /*
35165 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
35166 * updating the top-level pagetable entries to guarantee the
35167@@ -165,7 +227,7 @@ static void pgd_dtor(pgd_t *pgd)
35168 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
35169 * and initialize the kernel pmds here.
35170 */
35171-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
35172+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
35173
35174 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
35175 {
35176@@ -183,43 +245,45 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
35177 */
35178 flush_tlb_mm(mm);
35179 }
35180+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
35181+#define PREALLOCATED_PXDS USER_PGD_PTRS
35182 #else /* !CONFIG_X86_PAE */
35183
35184 /* No need to prepopulate any pagetable entries in non-PAE modes. */
35185-#define PREALLOCATED_PMDS 0
35186+#define PREALLOCATED_PXDS 0
35187
35188 #endif /* CONFIG_X86_PAE */
35189
35190-static void free_pmds(pmd_t *pmds[])
35191+static void free_pxds(pxd_t *pxds[])
35192 {
35193 int i;
35194
35195- for(i = 0; i < PREALLOCATED_PMDS; i++)
35196- if (pmds[i]) {
35197- pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
35198- free_page((unsigned long)pmds[i]);
35199+ for(i = 0; i < PREALLOCATED_PXDS; i++)
35200+ if (pxds[i]) {
35201+ pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
35202+ free_page((unsigned long)pxds[i]);
35203 }
35204 }
35205
35206-static int preallocate_pmds(pmd_t *pmds[])
35207+static int preallocate_pxds(pxd_t *pxds[])
35208 {
35209 int i;
35210 bool failed = false;
35211
35212- for(i = 0; i < PREALLOCATED_PMDS; i++) {
35213- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
35214- if (!pmd)
35215+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
35216+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
35217+ if (!pxd)
35218 failed = true;
35219- if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
35220- free_page((unsigned long)pmd);
35221- pmd = NULL;
35222+ if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
35223+ free_page((unsigned long)pxd);
35224+ pxd = NULL;
35225 failed = true;
35226 }
35227- pmds[i] = pmd;
35228+ pxds[i] = pxd;
35229 }
35230
35231 if (failed) {
35232- free_pmds(pmds);
35233+ free_pxds(pxds);
35234 return -ENOMEM;
35235 }
35236
35237@@ -232,49 +296,52 @@ static int preallocate_pmds(pmd_t *pmds[])
35238 * preallocate which never got a corresponding vma will need to be
35239 * freed manually.
35240 */
35241-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
35242+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
35243 {
35244 int i;
35245
35246- for(i = 0; i < PREALLOCATED_PMDS; i++) {
35247+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
35248 pgd_t pgd = pgdp[i];
35249
35250 if (pgd_val(pgd) != 0) {
35251- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
35252+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
35253
35254- pgdp[i] = native_make_pgd(0);
35255+ set_pgd(pgdp + i, native_make_pgd(0));
35256
35257- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
35258- pmd_free(mm, pmd);
35259+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
35260+ pxd_free(mm, pxd);
35261 }
35262 }
35263 }
35264
35265-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
35266+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
35267 {
35268- pud_t *pud;
35269+ pyd_t *pyd;
35270 int i;
35271
35272- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
35273+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
35274 return;
35275
35276- pud = pud_offset(pgd, 0);
35277-
35278- for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
35279- pmd_t *pmd = pmds[i];
35280+#ifdef CONFIG_X86_64
35281+ pyd = pyd_offset(mm, 0L);
35282+#else
35283+ pyd = pyd_offset(pgd, 0L);
35284+#endif
35285
35286+ for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
35287+ pxd_t *pxd = pxds[i];
35288 if (i >= KERNEL_PGD_BOUNDARY)
35289- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
35290- sizeof(pmd_t) * PTRS_PER_PMD);
35291+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
35292+ sizeof(pxd_t) * PTRS_PER_PMD);
35293
35294- pud_populate(mm, pud, pmd);
35295+ pyd_populate(mm, pyd, pxd);
35296 }
35297 }
35298
35299 pgd_t *pgd_alloc(struct mm_struct *mm)
35300 {
35301 pgd_t *pgd;
35302- pmd_t *pmds[PREALLOCATED_PMDS];
35303+ pxd_t *pxds[PREALLOCATED_PXDS];
35304
35305 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
35306
35307@@ -283,11 +350,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
35308
35309 mm->pgd = pgd;
35310
35311- if (preallocate_pmds(pmds) != 0)
35312+ if (preallocate_pxds(pxds) != 0)
35313 goto out_free_pgd;
35314
35315 if (paravirt_pgd_alloc(mm) != 0)
35316- goto out_free_pmds;
35317+ goto out_free_pxds;
35318
35319 /*
35320 * Make sure that pre-populating the pmds is atomic with
35321@@ -297,14 +364,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
35322 spin_lock(&pgd_lock);
35323
35324 pgd_ctor(mm, pgd);
35325- pgd_prepopulate_pmd(mm, pgd, pmds);
35326+ pgd_prepopulate_pxd(mm, pgd, pxds);
35327
35328 spin_unlock(&pgd_lock);
35329
35330 return pgd;
35331
35332-out_free_pmds:
35333- free_pmds(pmds);
35334+out_free_pxds:
35335+ free_pxds(pxds);
35336 out_free_pgd:
35337 free_page((unsigned long)pgd);
35338 out:
35339@@ -313,7 +380,7 @@ out:
35340
35341 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
35342 {
35343- pgd_mop_up_pmds(mm, pgd);
35344+ pgd_mop_up_pxds(mm, pgd);
35345 pgd_dtor(pgd);
35346 paravirt_pgd_free(mm, pgd);
35347 free_page((unsigned long)pgd);
35348diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
35349index 4dd8cf6..f9d143e 100644
35350--- a/arch/x86/mm/pgtable_32.c
35351+++ b/arch/x86/mm/pgtable_32.c
35352@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
35353 return;
35354 }
35355 pte = pte_offset_kernel(pmd, vaddr);
35356+
35357+ pax_open_kernel();
35358 if (pte_val(pteval))
35359 set_pte_at(&init_mm, vaddr, pte, pteval);
35360 else
35361 pte_clear(&init_mm, vaddr, pte);
35362+ pax_close_kernel();
35363
35364 /*
35365 * It's enough to flush this one mapping.
35366diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
35367index e666cbb..61788c45 100644
35368--- a/arch/x86/mm/physaddr.c
35369+++ b/arch/x86/mm/physaddr.c
35370@@ -10,7 +10,7 @@
35371 #ifdef CONFIG_X86_64
35372
35373 #ifdef CONFIG_DEBUG_VIRTUAL
35374-unsigned long __phys_addr(unsigned long x)
35375+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
35376 {
35377 unsigned long y = x - __START_KERNEL_map;
35378
35379@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
35380 #else
35381
35382 #ifdef CONFIG_DEBUG_VIRTUAL
35383-unsigned long __phys_addr(unsigned long x)
35384+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
35385 {
35386 unsigned long phys_addr = x - PAGE_OFFSET;
35387 /* VMALLOC_* aren't constants */
35388diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
35389index 90555bf..f5f1828 100644
35390--- a/arch/x86/mm/setup_nx.c
35391+++ b/arch/x86/mm/setup_nx.c
35392@@ -5,8 +5,10 @@
35393 #include <asm/pgtable.h>
35394 #include <asm/proto.h>
35395
35396+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
35397 static int disable_nx;
35398
35399+#ifndef CONFIG_PAX_PAGEEXEC
35400 /*
35401 * noexec = on|off
35402 *
35403@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
35404 return 0;
35405 }
35406 early_param("noexec", noexec_setup);
35407+#endif
35408+
35409+#endif
35410
35411 void x86_configure_nx(void)
35412 {
35413+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
35414 if (cpu_has_nx && !disable_nx)
35415 __supported_pte_mask |= _PAGE_NX;
35416 else
35417+#endif
35418 __supported_pte_mask &= ~_PAGE_NX;
35419 }
35420
35421diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
35422index ee61c36..e6fedeb 100644
35423--- a/arch/x86/mm/tlb.c
35424+++ b/arch/x86/mm/tlb.c
35425@@ -48,7 +48,11 @@ void leave_mm(int cpu)
35426 BUG();
35427 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
35428 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
35429+
35430+#ifndef CONFIG_PAX_PER_CPU_PGD
35431 load_cr3(swapper_pg_dir);
35432+#endif
35433+
35434 /*
35435 * This gets called in the idle path where RCU
35436 * functions differently. Tracing normally
35437diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
35438new file mode 100644
35439index 0000000..dace51c
35440--- /dev/null
35441+++ b/arch/x86/mm/uderef_64.c
35442@@ -0,0 +1,37 @@
35443+#include <linux/mm.h>
35444+#include <asm/pgtable.h>
35445+#include <asm/uaccess.h>
35446+
35447+#ifdef CONFIG_PAX_MEMORY_UDEREF
35448+/* PaX: due to the special call convention these functions must
35449+ * - remain leaf functions under all configurations,
35450+ * - never be called directly, only dereferenced from the wrappers.
35451+ */
35452+void __pax_open_userland(void)
35453+{
35454+ unsigned int cpu;
35455+
35456+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
35457+ return;
35458+
35459+ cpu = raw_get_cpu();
35460+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
35461+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
35462+ raw_put_cpu_no_resched();
35463+}
35464+EXPORT_SYMBOL(__pax_open_userland);
35465+
35466+void __pax_close_userland(void)
35467+{
35468+ unsigned int cpu;
35469+
35470+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
35471+ return;
35472+
35473+ cpu = raw_get_cpu();
35474+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
35475+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
35476+ raw_put_cpu_no_resched();
35477+}
35478+EXPORT_SYMBOL(__pax_close_userland);
35479+#endif
35480diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
35481index 6440221..f84b5c7 100644
35482--- a/arch/x86/net/bpf_jit.S
35483+++ b/arch/x86/net/bpf_jit.S
35484@@ -9,6 +9,7 @@
35485 */
35486 #include <linux/linkage.h>
35487 #include <asm/dwarf2.h>
35488+#include <asm/alternative-asm.h>
35489
35490 /*
35491 * Calling convention :
35492@@ -38,6 +39,7 @@ sk_load_word_positive_offset:
35493 jle bpf_slow_path_word
35494 mov (SKBDATA,%rsi),%eax
35495 bswap %eax /* ntohl() */
35496+ pax_force_retaddr
35497 ret
35498
35499 sk_load_half:
35500@@ -55,6 +57,7 @@ sk_load_half_positive_offset:
35501 jle bpf_slow_path_half
35502 movzwl (SKBDATA,%rsi),%eax
35503 rol $8,%ax # ntohs()
35504+ pax_force_retaddr
35505 ret
35506
35507 sk_load_byte:
35508@@ -69,6 +72,7 @@ sk_load_byte_positive_offset:
35509 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
35510 jle bpf_slow_path_byte
35511 movzbl (SKBDATA,%rsi),%eax
35512+ pax_force_retaddr
35513 ret
35514
35515 /* rsi contains offset and can be scratched */
35516@@ -90,6 +94,7 @@ bpf_slow_path_word:
35517 js bpf_error
35518 mov - MAX_BPF_STACK + 32(%rbp),%eax
35519 bswap %eax
35520+ pax_force_retaddr
35521 ret
35522
35523 bpf_slow_path_half:
35524@@ -98,12 +103,14 @@ bpf_slow_path_half:
35525 mov - MAX_BPF_STACK + 32(%rbp),%ax
35526 rol $8,%ax
35527 movzwl %ax,%eax
35528+ pax_force_retaddr
35529 ret
35530
35531 bpf_slow_path_byte:
35532 bpf_slow_path_common(1)
35533 js bpf_error
35534 movzbl - MAX_BPF_STACK + 32(%rbp),%eax
35535+ pax_force_retaddr
35536 ret
35537
35538 #define sk_negative_common(SIZE) \
35539@@ -126,6 +133,7 @@ sk_load_word_negative_offset:
35540 sk_negative_common(4)
35541 mov (%rax), %eax
35542 bswap %eax
35543+ pax_force_retaddr
35544 ret
35545
35546 bpf_slow_path_half_neg:
35547@@ -137,6 +145,7 @@ sk_load_half_negative_offset:
35548 mov (%rax),%ax
35549 rol $8,%ax
35550 movzwl %ax,%eax
35551+ pax_force_retaddr
35552 ret
35553
35554 bpf_slow_path_byte_neg:
35555@@ -146,6 +155,7 @@ sk_load_byte_negative_offset:
35556 .globl sk_load_byte_negative_offset
35557 sk_negative_common(1)
35558 movzbl (%rax), %eax
35559+ pax_force_retaddr
35560 ret
35561
35562 bpf_error:
35563@@ -156,4 +166,5 @@ bpf_error:
35564 mov - MAX_BPF_STACK + 16(%rbp),%r14
35565 mov - MAX_BPF_STACK + 24(%rbp),%r15
35566 leaveq
35567+ pax_force_retaddr
35568 ret
35569diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
35570index 5c8cb80..5fd7860 100644
35571--- a/arch/x86/net/bpf_jit_comp.c
35572+++ b/arch/x86/net/bpf_jit_comp.c
35573@@ -15,7 +15,11 @@
35574 #include <linux/if_vlan.h>
35575 #include <linux/random.h>
35576
35577+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
35578+int bpf_jit_enable __read_only;
35579+#else
35580 int bpf_jit_enable __read_mostly;
35581+#endif
35582
35583 /*
35584 * assembly code in arch/x86/net/bpf_jit.S
35585@@ -109,36 +113,32 @@ static inline void bpf_flush_icache(void *start, void *end)
35586 #define CHOOSE_LOAD_FUNC(K, func) \
35587 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
35588
35589-struct bpf_binary_header {
35590- unsigned int pages;
35591- /* Note : for security reasons, bpf code will follow a randomly
35592- * sized amount of int3 instructions
35593- */
35594- u8 image[];
35595-};
35596-
35597-static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
35598+/* Note : for security reasons, bpf code will follow a randomly
35599+ * sized amount of int3 instructions
35600+ */
35601+static u8 *bpf_alloc_binary(unsigned int proglen,
35602 u8 **image_ptr)
35603 {
35604 unsigned int sz, hole;
35605- struct bpf_binary_header *header;
35606+ u8 *header;
35607
35608 /* Most of BPF filters are really small,
35609 * but if some of them fill a page, allow at least
35610 * 128 extra bytes to insert a random section of int3
35611 */
35612- sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE);
35613- header = module_alloc(sz);
35614+ sz = round_up(proglen + 128, PAGE_SIZE);
35615+ header = module_alloc_exec(sz);
35616 if (!header)
35617 return NULL;
35618
35619+ pax_open_kernel();
35620 memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
35621+ pax_close_kernel();
35622
35623- header->pages = sz / PAGE_SIZE;
35624- hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));
35625+ hole = PAGE_SIZE - (proglen & ~PAGE_MASK);
35626
35627 /* insert a random number of int3 instructions before BPF code */
35628- *image_ptr = &header->image[prandom_u32() % hole];
35629+ *image_ptr = &header[prandom_u32() % hole];
35630 return header;
35631 }
35632
35633@@ -853,7 +853,9 @@ common_load: ctx->seen_ld_abs = true;
35634 pr_err("bpf_jit_compile fatal error\n");
35635 return -EFAULT;
35636 }
35637+ pax_open_kernel();
35638 memcpy(image + proglen, temp, ilen);
35639+ pax_close_kernel();
35640 }
35641 proglen += ilen;
35642 addrs[i] = proglen;
35643@@ -868,7 +870,7 @@ void bpf_jit_compile(struct bpf_prog *prog)
35644
35645 void bpf_int_jit_compile(struct bpf_prog *prog)
35646 {
35647- struct bpf_binary_header *header = NULL;
35648+ u8 *header = NULL;
35649 int proglen, oldproglen = 0;
35650 struct jit_context ctx = {};
35651 u8 *image = NULL;
35652@@ -900,7 +902,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
35653 if (proglen <= 0) {
35654 image = NULL;
35655 if (header)
35656- module_free(NULL, header);
35657+ module_free_exec(NULL, image);
35658 goto out;
35659 }
35660 if (image) {
35661@@ -922,7 +924,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
35662
35663 if (image) {
35664 bpf_flush_icache(header, image + proglen);
35665- set_memory_ro((unsigned long)header, header->pages);
35666 prog->bpf_func = (void *)image;
35667 prog->jited = 1;
35668 }
35669@@ -930,23 +931,15 @@ out:
35670 kfree(addrs);
35671 }
35672
35673-static void bpf_jit_free_deferred(struct work_struct *work)
35674-{
35675- struct bpf_prog *fp = container_of(work, struct bpf_prog, work);
35676- unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
35677- struct bpf_binary_header *header = (void *)addr;
35678-
35679- set_memory_rw(addr, header->pages);
35680- module_free(NULL, header);
35681- kfree(fp);
35682-}
35683-
35684 void bpf_jit_free(struct bpf_prog *fp)
35685 {
35686- if (fp->jited) {
35687- INIT_WORK(&fp->work, bpf_jit_free_deferred);
35688- schedule_work(&fp->work);
35689- } else {
35690- kfree(fp);
35691- }
35692+ unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
35693+
35694+ if (!fp->jited)
35695+ goto free_filter;
35696+
35697+ module_free_exec(NULL, (void *)addr);
35698+
35699+free_filter:
35700+ bpf_prog_unlock_free(fp);
35701 }
35702diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
35703index 5d04be5..2beeaa2 100644
35704--- a/arch/x86/oprofile/backtrace.c
35705+++ b/arch/x86/oprofile/backtrace.c
35706@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
35707 struct stack_frame_ia32 *fp;
35708 unsigned long bytes;
35709
35710- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
35711+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
35712 if (bytes != 0)
35713 return NULL;
35714
35715- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
35716+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
35717
35718 oprofile_add_trace(bufhead[0].return_address);
35719
35720@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
35721 struct stack_frame bufhead[2];
35722 unsigned long bytes;
35723
35724- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
35725+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
35726 if (bytes != 0)
35727 return NULL;
35728
35729@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
35730 {
35731 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
35732
35733- if (!user_mode_vm(regs)) {
35734+ if (!user_mode(regs)) {
35735 unsigned long stack = kernel_stack_pointer(regs);
35736 if (depth)
35737 dump_trace(NULL, regs, (unsigned long *)stack, 0,
35738diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
35739index 379e8bd..6386e09 100644
35740--- a/arch/x86/oprofile/nmi_int.c
35741+++ b/arch/x86/oprofile/nmi_int.c
35742@@ -23,6 +23,7 @@
35743 #include <asm/nmi.h>
35744 #include <asm/msr.h>
35745 #include <asm/apic.h>
35746+#include <asm/pgtable.h>
35747
35748 #include "op_counter.h"
35749 #include "op_x86_model.h"
35750@@ -785,8 +786,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
35751 if (ret)
35752 return ret;
35753
35754- if (!model->num_virt_counters)
35755- model->num_virt_counters = model->num_counters;
35756+ if (!model->num_virt_counters) {
35757+ pax_open_kernel();
35758+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
35759+ pax_close_kernel();
35760+ }
35761
35762 mux_init(ops);
35763
35764diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
35765index 50d86c0..7985318 100644
35766--- a/arch/x86/oprofile/op_model_amd.c
35767+++ b/arch/x86/oprofile/op_model_amd.c
35768@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
35769 num_counters = AMD64_NUM_COUNTERS;
35770 }
35771
35772- op_amd_spec.num_counters = num_counters;
35773- op_amd_spec.num_controls = num_counters;
35774- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
35775+ pax_open_kernel();
35776+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
35777+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
35778+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
35779+ pax_close_kernel();
35780
35781 return 0;
35782 }
35783diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
35784index d90528e..0127e2b 100644
35785--- a/arch/x86/oprofile/op_model_ppro.c
35786+++ b/arch/x86/oprofile/op_model_ppro.c
35787@@ -19,6 +19,7 @@
35788 #include <asm/msr.h>
35789 #include <asm/apic.h>
35790 #include <asm/nmi.h>
35791+#include <asm/pgtable.h>
35792
35793 #include "op_x86_model.h"
35794 #include "op_counter.h"
35795@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
35796
35797 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
35798
35799- op_arch_perfmon_spec.num_counters = num_counters;
35800- op_arch_perfmon_spec.num_controls = num_counters;
35801+ pax_open_kernel();
35802+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
35803+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
35804+ pax_close_kernel();
35805 }
35806
35807 static int arch_perfmon_init(struct oprofile_operations *ignore)
35808diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
35809index 71e8a67..6a313bb 100644
35810--- a/arch/x86/oprofile/op_x86_model.h
35811+++ b/arch/x86/oprofile/op_x86_model.h
35812@@ -52,7 +52,7 @@ struct op_x86_model_spec {
35813 void (*switch_ctrl)(struct op_x86_model_spec const *model,
35814 struct op_msrs const * const msrs);
35815 #endif
35816-};
35817+} __do_const;
35818
35819 struct op_counter_config;
35820
35821diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
35822index b9958c3..24229ab 100644
35823--- a/arch/x86/pci/intel_mid_pci.c
35824+++ b/arch/x86/pci/intel_mid_pci.c
35825@@ -250,7 +250,7 @@ int __init intel_mid_pci_init(void)
35826 pci_mmcfg_late_init();
35827 pcibios_enable_irq = intel_mid_pci_irq_enable;
35828 pcibios_disable_irq = intel_mid_pci_irq_disable;
35829- pci_root_ops = intel_mid_pci_ops;
35830+ memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
35831 pci_soc_mode = 1;
35832 /* Continue with standard init */
35833 return 1;
35834diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
35835index eb500c2..eab9e70 100644
35836--- a/arch/x86/pci/irq.c
35837+++ b/arch/x86/pci/irq.c
35838@@ -51,7 +51,7 @@ struct irq_router {
35839 struct irq_router_handler {
35840 u16 vendor;
35841 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
35842-};
35843+} __do_const;
35844
35845 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
35846 void (*pcibios_disable_irq)(struct pci_dev *dev) = pirq_disable_irq;
35847@@ -791,7 +791,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
35848 return 0;
35849 }
35850
35851-static __initdata struct irq_router_handler pirq_routers[] = {
35852+static __initconst const struct irq_router_handler pirq_routers[] = {
35853 { PCI_VENDOR_ID_INTEL, intel_router_probe },
35854 { PCI_VENDOR_ID_AL, ali_router_probe },
35855 { PCI_VENDOR_ID_ITE, ite_router_probe },
35856@@ -818,7 +818,7 @@ static struct pci_dev *pirq_router_dev;
35857 static void __init pirq_find_router(struct irq_router *r)
35858 {
35859 struct irq_routing_table *rt = pirq_table;
35860- struct irq_router_handler *h;
35861+ const struct irq_router_handler *h;
35862
35863 #ifdef CONFIG_PCI_BIOS
35864 if (!rt->signature) {
35865@@ -1091,7 +1091,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
35866 return 0;
35867 }
35868
35869-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
35870+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
35871 {
35872 .callback = fix_broken_hp_bios_irq9,
35873 .ident = "HP Pavilion N5400 Series Laptop",
35874diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
35875index c77b24a..c979855 100644
35876--- a/arch/x86/pci/pcbios.c
35877+++ b/arch/x86/pci/pcbios.c
35878@@ -79,7 +79,7 @@ union bios32 {
35879 static struct {
35880 unsigned long address;
35881 unsigned short segment;
35882-} bios32_indirect = { 0, __KERNEL_CS };
35883+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
35884
35885 /*
35886 * Returns the entry point for the given service, NULL on error
35887@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
35888 unsigned long length; /* %ecx */
35889 unsigned long entry; /* %edx */
35890 unsigned long flags;
35891+ struct desc_struct d, *gdt;
35892
35893 local_irq_save(flags);
35894- __asm__("lcall *(%%edi); cld"
35895+
35896+ gdt = get_cpu_gdt_table(smp_processor_id());
35897+
35898+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
35899+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
35900+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
35901+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
35902+
35903+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
35904 : "=a" (return_code),
35905 "=b" (address),
35906 "=c" (length),
35907 "=d" (entry)
35908 : "0" (service),
35909 "1" (0),
35910- "D" (&bios32_indirect));
35911+ "D" (&bios32_indirect),
35912+ "r"(__PCIBIOS_DS)
35913+ : "memory");
35914+
35915+ pax_open_kernel();
35916+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
35917+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
35918+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
35919+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
35920+ pax_close_kernel();
35921+
35922 local_irq_restore(flags);
35923
35924 switch (return_code) {
35925- case 0:
35926- return address + entry;
35927- case 0x80: /* Not present */
35928- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
35929- return 0;
35930- default: /* Shouldn't happen */
35931- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
35932- service, return_code);
35933+ case 0: {
35934+ int cpu;
35935+ unsigned char flags;
35936+
35937+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
35938+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
35939+ printk(KERN_WARNING "bios32_service: not valid\n");
35940 return 0;
35941+ }
35942+ address = address + PAGE_OFFSET;
35943+ length += 16UL; /* some BIOSs underreport this... */
35944+ flags = 4;
35945+ if (length >= 64*1024*1024) {
35946+ length >>= PAGE_SHIFT;
35947+ flags |= 8;
35948+ }
35949+
35950+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
35951+ gdt = get_cpu_gdt_table(cpu);
35952+ pack_descriptor(&d, address, length, 0x9b, flags);
35953+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
35954+ pack_descriptor(&d, address, length, 0x93, flags);
35955+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
35956+ }
35957+ return entry;
35958+ }
35959+ case 0x80: /* Not present */
35960+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
35961+ return 0;
35962+ default: /* Shouldn't happen */
35963+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
35964+ service, return_code);
35965+ return 0;
35966 }
35967 }
35968
35969 static struct {
35970 unsigned long address;
35971 unsigned short segment;
35972-} pci_indirect = { 0, __KERNEL_CS };
35973+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
35974
35975-static int pci_bios_present;
35976+static int pci_bios_present __read_only;
35977
35978 static int check_pcibios(void)
35979 {
35980@@ -131,11 +174,13 @@ static int check_pcibios(void)
35981 unsigned long flags, pcibios_entry;
35982
35983 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
35984- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
35985+ pci_indirect.address = pcibios_entry;
35986
35987 local_irq_save(flags);
35988- __asm__(
35989- "lcall *(%%edi); cld\n\t"
35990+ __asm__("movw %w6, %%ds\n\t"
35991+ "lcall *%%ss:(%%edi); cld\n\t"
35992+ "push %%ss\n\t"
35993+ "pop %%ds\n\t"
35994 "jc 1f\n\t"
35995 "xor %%ah, %%ah\n"
35996 "1:"
35997@@ -144,7 +189,8 @@ static int check_pcibios(void)
35998 "=b" (ebx),
35999 "=c" (ecx)
36000 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
36001- "D" (&pci_indirect)
36002+ "D" (&pci_indirect),
36003+ "r" (__PCIBIOS_DS)
36004 : "memory");
36005 local_irq_restore(flags);
36006
36007@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36008
36009 switch (len) {
36010 case 1:
36011- __asm__("lcall *(%%esi); cld\n\t"
36012+ __asm__("movw %w6, %%ds\n\t"
36013+ "lcall *%%ss:(%%esi); cld\n\t"
36014+ "push %%ss\n\t"
36015+ "pop %%ds\n\t"
36016 "jc 1f\n\t"
36017 "xor %%ah, %%ah\n"
36018 "1:"
36019@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36020 : "1" (PCIBIOS_READ_CONFIG_BYTE),
36021 "b" (bx),
36022 "D" ((long)reg),
36023- "S" (&pci_indirect));
36024+ "S" (&pci_indirect),
36025+ "r" (__PCIBIOS_DS));
36026 /*
36027 * Zero-extend the result beyond 8 bits, do not trust the
36028 * BIOS having done it:
36029@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36030 *value &= 0xff;
36031 break;
36032 case 2:
36033- __asm__("lcall *(%%esi); cld\n\t"
36034+ __asm__("movw %w6, %%ds\n\t"
36035+ "lcall *%%ss:(%%esi); cld\n\t"
36036+ "push %%ss\n\t"
36037+ "pop %%ds\n\t"
36038 "jc 1f\n\t"
36039 "xor %%ah, %%ah\n"
36040 "1:"
36041@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36042 : "1" (PCIBIOS_READ_CONFIG_WORD),
36043 "b" (bx),
36044 "D" ((long)reg),
36045- "S" (&pci_indirect));
36046+ "S" (&pci_indirect),
36047+ "r" (__PCIBIOS_DS));
36048 /*
36049 * Zero-extend the result beyond 16 bits, do not trust the
36050 * BIOS having done it:
36051@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36052 *value &= 0xffff;
36053 break;
36054 case 4:
36055- __asm__("lcall *(%%esi); cld\n\t"
36056+ __asm__("movw %w6, %%ds\n\t"
36057+ "lcall *%%ss:(%%esi); cld\n\t"
36058+ "push %%ss\n\t"
36059+ "pop %%ds\n\t"
36060 "jc 1f\n\t"
36061 "xor %%ah, %%ah\n"
36062 "1:"
36063@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
36064 : "1" (PCIBIOS_READ_CONFIG_DWORD),
36065 "b" (bx),
36066 "D" ((long)reg),
36067- "S" (&pci_indirect));
36068+ "S" (&pci_indirect),
36069+ "r" (__PCIBIOS_DS));
36070 break;
36071 }
36072
36073@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
36074
36075 switch (len) {
36076 case 1:
36077- __asm__("lcall *(%%esi); cld\n\t"
36078+ __asm__("movw %w6, %%ds\n\t"
36079+ "lcall *%%ss:(%%esi); cld\n\t"
36080+ "push %%ss\n\t"
36081+ "pop %%ds\n\t"
36082 "jc 1f\n\t"
36083 "xor %%ah, %%ah\n"
36084 "1:"
36085@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
36086 "c" (value),
36087 "b" (bx),
36088 "D" ((long)reg),
36089- "S" (&pci_indirect));
36090+ "S" (&pci_indirect),
36091+ "r" (__PCIBIOS_DS));
36092 break;
36093 case 2:
36094- __asm__("lcall *(%%esi); cld\n\t"
36095+ __asm__("movw %w6, %%ds\n\t"
36096+ "lcall *%%ss:(%%esi); cld\n\t"
36097+ "push %%ss\n\t"
36098+ "pop %%ds\n\t"
36099 "jc 1f\n\t"
36100 "xor %%ah, %%ah\n"
36101 "1:"
36102@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
36103 "c" (value),
36104 "b" (bx),
36105 "D" ((long)reg),
36106- "S" (&pci_indirect));
36107+ "S" (&pci_indirect),
36108+ "r" (__PCIBIOS_DS));
36109 break;
36110 case 4:
36111- __asm__("lcall *(%%esi); cld\n\t"
36112+ __asm__("movw %w6, %%ds\n\t"
36113+ "lcall *%%ss:(%%esi); cld\n\t"
36114+ "push %%ss\n\t"
36115+ "pop %%ds\n\t"
36116 "jc 1f\n\t"
36117 "xor %%ah, %%ah\n"
36118 "1:"
36119@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
36120 "c" (value),
36121 "b" (bx),
36122 "D" ((long)reg),
36123- "S" (&pci_indirect));
36124+ "S" (&pci_indirect),
36125+ "r" (__PCIBIOS_DS));
36126 break;
36127 }
36128
36129@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
36130
36131 DBG("PCI: Fetching IRQ routing table... ");
36132 __asm__("push %%es\n\t"
36133+ "movw %w8, %%ds\n\t"
36134 "push %%ds\n\t"
36135 "pop %%es\n\t"
36136- "lcall *(%%esi); cld\n\t"
36137+ "lcall *%%ss:(%%esi); cld\n\t"
36138 "pop %%es\n\t"
36139+ "push %%ss\n\t"
36140+ "pop %%ds\n"
36141 "jc 1f\n\t"
36142 "xor %%ah, %%ah\n"
36143 "1:"
36144@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
36145 "1" (0),
36146 "D" ((long) &opt),
36147 "S" (&pci_indirect),
36148- "m" (opt)
36149+ "m" (opt),
36150+ "r" (__PCIBIOS_DS)
36151 : "memory");
36152 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
36153 if (ret & 0xff00)
36154@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
36155 {
36156 int ret;
36157
36158- __asm__("lcall *(%%esi); cld\n\t"
36159+ __asm__("movw %w5, %%ds\n\t"
36160+ "lcall *%%ss:(%%esi); cld\n\t"
36161+ "push %%ss\n\t"
36162+ "pop %%ds\n"
36163 "jc 1f\n\t"
36164 "xor %%ah, %%ah\n"
36165 "1:"
36166@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
36167 : "0" (PCIBIOS_SET_PCI_HW_INT),
36168 "b" ((dev->bus->number << 8) | dev->devfn),
36169 "c" ((irq << 8) | (pin + 10)),
36170- "S" (&pci_indirect));
36171+ "S" (&pci_indirect),
36172+ "r" (__PCIBIOS_DS));
36173 return !(ret & 0xff00);
36174 }
36175 EXPORT_SYMBOL(pcibios_set_irq_routing);
36176diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
36177index 9ee3491..872192f 100644
36178--- a/arch/x86/platform/efi/efi_32.c
36179+++ b/arch/x86/platform/efi/efi_32.c
36180@@ -59,11 +59,22 @@ void efi_call_phys_prelog(void)
36181 {
36182 struct desc_ptr gdt_descr;
36183
36184+#ifdef CONFIG_PAX_KERNEXEC
36185+ struct desc_struct d;
36186+#endif
36187+
36188 local_irq_save(efi_rt_eflags);
36189
36190 load_cr3(initial_page_table);
36191 __flush_tlb_all();
36192
36193+#ifdef CONFIG_PAX_KERNEXEC
36194+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
36195+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
36196+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
36197+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
36198+#endif
36199+
36200 gdt_descr.address = __pa(get_cpu_gdt_table(0));
36201 gdt_descr.size = GDT_SIZE - 1;
36202 load_gdt(&gdt_descr);
36203@@ -73,11 +84,24 @@ void efi_call_phys_epilog(void)
36204 {
36205 struct desc_ptr gdt_descr;
36206
36207+#ifdef CONFIG_PAX_KERNEXEC
36208+ struct desc_struct d;
36209+
36210+ memset(&d, 0, sizeof d);
36211+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
36212+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
36213+#endif
36214+
36215 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
36216 gdt_descr.size = GDT_SIZE - 1;
36217 load_gdt(&gdt_descr);
36218
36219+#ifdef CONFIG_PAX_PER_CPU_PGD
36220+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
36221+#else
36222 load_cr3(swapper_pg_dir);
36223+#endif
36224+
36225 __flush_tlb_all();
36226
36227 local_irq_restore(efi_rt_eflags);
36228diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
36229index 290d397..e09d270 100644
36230--- a/arch/x86/platform/efi/efi_64.c
36231+++ b/arch/x86/platform/efi/efi_64.c
36232@@ -99,6 +99,11 @@ void __init efi_call_phys_prelog(void)
36233 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
36234 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
36235 }
36236+
36237+#ifdef CONFIG_PAX_PER_CPU_PGD
36238+ load_cr3(swapper_pg_dir);
36239+#endif
36240+
36241 __flush_tlb_all();
36242 }
36243
36244@@ -116,6 +121,11 @@ void __init efi_call_phys_epilog(void)
36245 for (pgd = 0; pgd < n_pgds; pgd++)
36246 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
36247 kfree(save_pgd);
36248+
36249+#ifdef CONFIG_PAX_PER_CPU_PGD
36250+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
36251+#endif
36252+
36253 __flush_tlb_all();
36254 local_irq_restore(efi_flags);
36255 early_code_mapping_set_exec(0);
36256@@ -146,8 +156,23 @@ int efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
36257 unsigned npages;
36258 pgd_t *pgd;
36259
36260- if (efi_enabled(EFI_OLD_MEMMAP))
36261+ if (efi_enabled(EFI_OLD_MEMMAP)) {
36262+ /* PaX: We need to disable the NX bit in the PGD, otherwise we won't be
36263+ * able to execute the EFI services.
36264+ */
36265+ if (__supported_pte_mask & _PAGE_NX) {
36266+ unsigned long addr = (unsigned long) __va(0);
36267+ pgd_t pe = __pgd(pgd_val(*pgd_offset_k(addr)) & ~_PAGE_NX);
36268+
36269+ pr_alert("PAX: Disabling NX protection for low memory map. Try booting without \"efi=old_map\"\n");
36270+#ifdef CONFIG_PAX_PER_CPU_PGD
36271+ set_pgd(pgd_offset_cpu(0, kernel, addr), pe);
36272+#endif
36273+ set_pgd(pgd_offset_k(addr), pe);
36274+ }
36275+
36276 return 0;
36277+ }
36278
36279 efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
36280 pgd = __va(efi_scratch.efi_pgt);
36281diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
36282index fbe66e6..eae5e38 100644
36283--- a/arch/x86/platform/efi/efi_stub_32.S
36284+++ b/arch/x86/platform/efi/efi_stub_32.S
36285@@ -6,7 +6,9 @@
36286 */
36287
36288 #include <linux/linkage.h>
36289+#include <linux/init.h>
36290 #include <asm/page_types.h>
36291+#include <asm/segment.h>
36292
36293 /*
36294 * efi_call_phys(void *, ...) is a function with variable parameters.
36295@@ -20,7 +22,7 @@
36296 * service functions will comply with gcc calling convention, too.
36297 */
36298
36299-.text
36300+__INIT
36301 ENTRY(efi_call_phys)
36302 /*
36303 * 0. The function can only be called in Linux kernel. So CS has been
36304@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
36305 * The mapping of lower virtual memory has been created in prelog and
36306 * epilog.
36307 */
36308- movl $1f, %edx
36309- subl $__PAGE_OFFSET, %edx
36310- jmp *%edx
36311+#ifdef CONFIG_PAX_KERNEXEC
36312+ movl $(__KERNEXEC_EFI_DS), %edx
36313+ mov %edx, %ds
36314+ mov %edx, %es
36315+ mov %edx, %ss
36316+ addl $2f,(1f)
36317+ ljmp *(1f)
36318+
36319+__INITDATA
36320+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
36321+.previous
36322+
36323+2:
36324+ subl $2b,(1b)
36325+#else
36326+ jmp 1f-__PAGE_OFFSET
36327 1:
36328+#endif
36329
36330 /*
36331 * 2. Now on the top of stack is the return
36332@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
36333 * parameter 2, ..., param n. To make things easy, we save the return
36334 * address of efi_call_phys in a global variable.
36335 */
36336- popl %edx
36337- movl %edx, saved_return_addr
36338- /* get the function pointer into ECX*/
36339- popl %ecx
36340- movl %ecx, efi_rt_function_ptr
36341- movl $2f, %edx
36342- subl $__PAGE_OFFSET, %edx
36343- pushl %edx
36344+ popl (saved_return_addr)
36345+ popl (efi_rt_function_ptr)
36346
36347 /*
36348 * 3. Clear PG bit in %CR0.
36349@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
36350 /*
36351 * 5. Call the physical function.
36352 */
36353- jmp *%ecx
36354+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
36355
36356-2:
36357 /*
36358 * 6. After EFI runtime service returns, control will return to
36359 * following instruction. We'd better readjust stack pointer first.
36360@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
36361 movl %cr0, %edx
36362 orl $0x80000000, %edx
36363 movl %edx, %cr0
36364- jmp 1f
36365-1:
36366+
36367 /*
36368 * 8. Now restore the virtual mode from flat mode by
36369 * adding EIP with PAGE_OFFSET.
36370 */
36371- movl $1f, %edx
36372- jmp *%edx
36373+#ifdef CONFIG_PAX_KERNEXEC
36374+ movl $(__KERNEL_DS), %edx
36375+ mov %edx, %ds
36376+ mov %edx, %es
36377+ mov %edx, %ss
36378+ ljmp $(__KERNEL_CS),$1f
36379+#else
36380+ jmp 1f+__PAGE_OFFSET
36381+#endif
36382 1:
36383
36384 /*
36385 * 9. Balance the stack. And because EAX contain the return value,
36386 * we'd better not clobber it.
36387 */
36388- leal efi_rt_function_ptr, %edx
36389- movl (%edx), %ecx
36390- pushl %ecx
36391+ pushl (efi_rt_function_ptr)
36392
36393 /*
36394- * 10. Push the saved return address onto the stack and return.
36395+ * 10. Return to the saved return address.
36396 */
36397- leal saved_return_addr, %edx
36398- movl (%edx), %ecx
36399- pushl %ecx
36400- ret
36401+ jmpl *(saved_return_addr)
36402 ENDPROC(efi_call_phys)
36403 .previous
36404
36405-.data
36406+__INITDATA
36407 saved_return_addr:
36408 .long 0
36409 efi_rt_function_ptr:
36410diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
36411index 5fcda72..cd4dc41 100644
36412--- a/arch/x86/platform/efi/efi_stub_64.S
36413+++ b/arch/x86/platform/efi/efi_stub_64.S
36414@@ -11,6 +11,7 @@
36415 #include <asm/msr.h>
36416 #include <asm/processor-flags.h>
36417 #include <asm/page_types.h>
36418+#include <asm/alternative-asm.h>
36419
36420 #define SAVE_XMM \
36421 mov %rsp, %rax; \
36422@@ -88,6 +89,7 @@ ENTRY(efi_call)
36423 RESTORE_PGT
36424 addq $48, %rsp
36425 RESTORE_XMM
36426+ pax_force_retaddr 0, 1
36427 ret
36428 ENDPROC(efi_call)
36429
36430@@ -245,8 +247,8 @@ efi_gdt64:
36431 .long 0 /* Filled out by user */
36432 .word 0
36433 .quad 0x0000000000000000 /* NULL descriptor */
36434- .quad 0x00af9a000000ffff /* __KERNEL_CS */
36435- .quad 0x00cf92000000ffff /* __KERNEL_DS */
36436+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
36437+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
36438 .quad 0x0080890000000000 /* TS descriptor */
36439 .quad 0x0000000000000000 /* TS continued */
36440 efi_gdt64_end:
36441diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
36442index 1bbedc4..eb795b5 100644
36443--- a/arch/x86/platform/intel-mid/intel-mid.c
36444+++ b/arch/x86/platform/intel-mid/intel-mid.c
36445@@ -71,9 +71,10 @@ static void intel_mid_power_off(void)
36446 {
36447 };
36448
36449-static void intel_mid_reboot(void)
36450+static void __noreturn intel_mid_reboot(void)
36451 {
36452 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
36453+ BUG();
36454 }
36455
36456 static unsigned long __init intel_mid_calibrate_tsc(void)
36457diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
36458index 46aa25c..59a68ed 100644
36459--- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
36460+++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
36461@@ -10,10 +10,9 @@
36462 */
36463
36464
36465-/* __attribute__((weak)) makes these declarations overridable */
36466 /* For every CPU addition a new get_<cpuname>_ops interface needs
36467 * to be added.
36468 */
36469-extern void *get_penwell_ops(void) __attribute__((weak));
36470-extern void *get_cloverview_ops(void) __attribute__((weak));
36471-extern void *get_tangier_ops(void) __attribute__((weak));
36472+extern const void *get_penwell_ops(void);
36473+extern const void *get_cloverview_ops(void);
36474+extern const void *get_tangier_ops(void);
36475diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
36476index 23381d2..8ddc10e 100644
36477--- a/arch/x86/platform/intel-mid/mfld.c
36478+++ b/arch/x86/platform/intel-mid/mfld.c
36479@@ -64,12 +64,12 @@ static void __init penwell_arch_setup(void)
36480 pm_power_off = mfld_power_off;
36481 }
36482
36483-void *get_penwell_ops(void)
36484+const void *get_penwell_ops(void)
36485 {
36486 return &penwell_ops;
36487 }
36488
36489-void *get_cloverview_ops(void)
36490+const void *get_cloverview_ops(void)
36491 {
36492 return &penwell_ops;
36493 }
36494diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c
36495index aaca917..66eadbc 100644
36496--- a/arch/x86/platform/intel-mid/mrfl.c
36497+++ b/arch/x86/platform/intel-mid/mrfl.c
36498@@ -97,7 +97,7 @@ static struct intel_mid_ops tangier_ops = {
36499 .arch_setup = tangier_arch_setup,
36500 };
36501
36502-void *get_tangier_ops(void)
36503+const void *get_tangier_ops(void)
36504 {
36505 return &tangier_ops;
36506 }
36507diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
36508index d6ee929..3637cb5 100644
36509--- a/arch/x86/platform/olpc/olpc_dt.c
36510+++ b/arch/x86/platform/olpc/olpc_dt.c
36511@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
36512 return res;
36513 }
36514
36515-static struct of_pdt_ops prom_olpc_ops __initdata = {
36516+static struct of_pdt_ops prom_olpc_ops __initconst = {
36517 .nextprop = olpc_dt_nextprop,
36518 .getproplen = olpc_dt_getproplen,
36519 .getproperty = olpc_dt_getproperty,
36520diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
36521index 6ec7910..ecdbb11 100644
36522--- a/arch/x86/power/cpu.c
36523+++ b/arch/x86/power/cpu.c
36524@@ -137,11 +137,8 @@ static void do_fpu_end(void)
36525 static void fix_processor_context(void)
36526 {
36527 int cpu = smp_processor_id();
36528- struct tss_struct *t = &per_cpu(init_tss, cpu);
36529-#ifdef CONFIG_X86_64
36530- struct desc_struct *desc = get_cpu_gdt_table(cpu);
36531- tss_desc tss;
36532-#endif
36533+ struct tss_struct *t = init_tss + cpu;
36534+
36535 set_tss_desc(cpu, t); /*
36536 * This just modifies memory; should not be
36537 * necessary. But... This is necessary, because
36538@@ -150,10 +147,6 @@ static void fix_processor_context(void)
36539 */
36540
36541 #ifdef CONFIG_X86_64
36542- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
36543- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
36544- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
36545-
36546 syscall_init(); /* This sets MSR_*STAR and related */
36547 #endif
36548 load_TR_desc(); /* This does ltr */
36549diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
36550index bad628a..a102610 100644
36551--- a/arch/x86/realmode/init.c
36552+++ b/arch/x86/realmode/init.c
36553@@ -68,7 +68,13 @@ void __init setup_real_mode(void)
36554 __va(real_mode_header->trampoline_header);
36555
36556 #ifdef CONFIG_X86_32
36557- trampoline_header->start = __pa_symbol(startup_32_smp);
36558+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
36559+
36560+#ifdef CONFIG_PAX_KERNEXEC
36561+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
36562+#endif
36563+
36564+ trampoline_header->boot_cs = __BOOT_CS;
36565 trampoline_header->gdt_limit = __BOOT_DS + 7;
36566 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
36567 #else
36568@@ -84,7 +90,7 @@ void __init setup_real_mode(void)
36569 *trampoline_cr4_features = read_cr4();
36570
36571 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
36572- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
36573+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
36574 trampoline_pgd[511] = init_level4_pgt[511].pgd;
36575 #endif
36576 }
36577diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
36578index 7c0d7be..d24dc88 100644
36579--- a/arch/x86/realmode/rm/Makefile
36580+++ b/arch/x86/realmode/rm/Makefile
36581@@ -67,5 +67,8 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
36582
36583 KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
36584 -I$(srctree)/arch/x86/boot
36585+ifdef CONSTIFY_PLUGIN
36586+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
36587+endif
36588 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
36589 GCOV_PROFILE := n
36590diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
36591index a28221d..93c40f1 100644
36592--- a/arch/x86/realmode/rm/header.S
36593+++ b/arch/x86/realmode/rm/header.S
36594@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
36595 #endif
36596 /* APM/BIOS reboot */
36597 .long pa_machine_real_restart_asm
36598-#ifdef CONFIG_X86_64
36599+#ifdef CONFIG_X86_32
36600+ .long __KERNEL_CS
36601+#else
36602 .long __KERNEL32_CS
36603 #endif
36604 END(real_mode_header)
36605diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
36606index 48ddd76..c26749f 100644
36607--- a/arch/x86/realmode/rm/trampoline_32.S
36608+++ b/arch/x86/realmode/rm/trampoline_32.S
36609@@ -24,6 +24,12 @@
36610 #include <asm/page_types.h>
36611 #include "realmode.h"
36612
36613+#ifdef CONFIG_PAX_KERNEXEC
36614+#define ta(X) (X)
36615+#else
36616+#define ta(X) (pa_ ## X)
36617+#endif
36618+
36619 .text
36620 .code16
36621
36622@@ -38,8 +44,6 @@ ENTRY(trampoline_start)
36623
36624 cli # We should be safe anyway
36625
36626- movl tr_start, %eax # where we need to go
36627-
36628 movl $0xA5A5A5A5, trampoline_status
36629 # write marker for master knows we're running
36630
36631@@ -55,7 +59,7 @@ ENTRY(trampoline_start)
36632 movw $1, %dx # protected mode (PE) bit
36633 lmsw %dx # into protected mode
36634
36635- ljmpl $__BOOT_CS, $pa_startup_32
36636+ ljmpl *(trampoline_header)
36637
36638 .section ".text32","ax"
36639 .code32
36640@@ -66,7 +70,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
36641 .balign 8
36642 GLOBAL(trampoline_header)
36643 tr_start: .space 4
36644- tr_gdt_pad: .space 2
36645+ tr_boot_cs: .space 2
36646 tr_gdt: .space 6
36647 END(trampoline_header)
36648
36649diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
36650index dac7b20..72dbaca 100644
36651--- a/arch/x86/realmode/rm/trampoline_64.S
36652+++ b/arch/x86/realmode/rm/trampoline_64.S
36653@@ -93,6 +93,7 @@ ENTRY(startup_32)
36654 movl %edx, %gs
36655
36656 movl pa_tr_cr4, %eax
36657+ andl $~X86_CR4_PCIDE, %eax
36658 movl %eax, %cr4 # Enable PAE mode
36659
36660 # Setup trampoline 4 level pagetables
36661@@ -106,7 +107,7 @@ ENTRY(startup_32)
36662 wrmsr
36663
36664 # Enable paging and in turn activate Long Mode
36665- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
36666+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
36667 movl %eax, %cr0
36668
36669 /*
36670diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
36671index 9e7e147..25a4158 100644
36672--- a/arch/x86/realmode/rm/wakeup_asm.S
36673+++ b/arch/x86/realmode/rm/wakeup_asm.S
36674@@ -126,11 +126,10 @@ ENTRY(wakeup_start)
36675 lgdtl pmode_gdt
36676
36677 /* This really couldn't... */
36678- movl pmode_entry, %eax
36679 movl pmode_cr0, %ecx
36680 movl %ecx, %cr0
36681- ljmpl $__KERNEL_CS, $pa_startup_32
36682- /* -> jmp *%eax in trampoline_32.S */
36683+
36684+ ljmpl *pmode_entry
36685 #else
36686 jmp trampoline_start
36687 #endif
36688diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
36689index 604a37e..e49702a 100644
36690--- a/arch/x86/tools/Makefile
36691+++ b/arch/x86/tools/Makefile
36692@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
36693
36694 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
36695
36696-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
36697+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
36698 hostprogs-y += relocs
36699 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
36700 PHONY += relocs
36701diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
36702index bbb1d22..e505211 100644
36703--- a/arch/x86/tools/relocs.c
36704+++ b/arch/x86/tools/relocs.c
36705@@ -1,5 +1,7 @@
36706 /* This is included from relocs_32/64.c */
36707
36708+#include "../../../include/generated/autoconf.h"
36709+
36710 #define ElfW(type) _ElfW(ELF_BITS, type)
36711 #define _ElfW(bits, type) __ElfW(bits, type)
36712 #define __ElfW(bits, type) Elf##bits##_##type
36713@@ -11,6 +13,7 @@
36714 #define Elf_Sym ElfW(Sym)
36715
36716 static Elf_Ehdr ehdr;
36717+static Elf_Phdr *phdr;
36718
36719 struct relocs {
36720 uint32_t *offset;
36721@@ -383,9 +386,39 @@ static void read_ehdr(FILE *fp)
36722 }
36723 }
36724
36725+static void read_phdrs(FILE *fp)
36726+{
36727+ unsigned int i;
36728+
36729+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
36730+ if (!phdr) {
36731+ die("Unable to allocate %d program headers\n",
36732+ ehdr.e_phnum);
36733+ }
36734+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
36735+ die("Seek to %d failed: %s\n",
36736+ ehdr.e_phoff, strerror(errno));
36737+ }
36738+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
36739+ die("Cannot read ELF program headers: %s\n",
36740+ strerror(errno));
36741+ }
36742+ for(i = 0; i < ehdr.e_phnum; i++) {
36743+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
36744+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
36745+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
36746+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
36747+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
36748+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
36749+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
36750+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
36751+ }
36752+
36753+}
36754+
36755 static void read_shdrs(FILE *fp)
36756 {
36757- int i;
36758+ unsigned int i;
36759 Elf_Shdr shdr;
36760
36761 secs = calloc(ehdr.e_shnum, sizeof(struct section));
36762@@ -420,7 +453,7 @@ static void read_shdrs(FILE *fp)
36763
36764 static void read_strtabs(FILE *fp)
36765 {
36766- int i;
36767+ unsigned int i;
36768 for (i = 0; i < ehdr.e_shnum; i++) {
36769 struct section *sec = &secs[i];
36770 if (sec->shdr.sh_type != SHT_STRTAB) {
36771@@ -445,7 +478,7 @@ static void read_strtabs(FILE *fp)
36772
36773 static void read_symtabs(FILE *fp)
36774 {
36775- int i,j;
36776+ unsigned int i,j;
36777 for (i = 0; i < ehdr.e_shnum; i++) {
36778 struct section *sec = &secs[i];
36779 if (sec->shdr.sh_type != SHT_SYMTAB) {
36780@@ -476,9 +509,11 @@ static void read_symtabs(FILE *fp)
36781 }
36782
36783
36784-static void read_relocs(FILE *fp)
36785+static void read_relocs(FILE *fp, int use_real_mode)
36786 {
36787- int i,j;
36788+ unsigned int i,j;
36789+ uint32_t base;
36790+
36791 for (i = 0; i < ehdr.e_shnum; i++) {
36792 struct section *sec = &secs[i];
36793 if (sec->shdr.sh_type != SHT_REL_TYPE) {
36794@@ -498,9 +533,22 @@ static void read_relocs(FILE *fp)
36795 die("Cannot read symbol table: %s\n",
36796 strerror(errno));
36797 }
36798+ base = 0;
36799+
36800+#ifdef CONFIG_X86_32
36801+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
36802+ if (phdr[j].p_type != PT_LOAD )
36803+ continue;
36804+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
36805+ continue;
36806+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
36807+ break;
36808+ }
36809+#endif
36810+
36811 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
36812 Elf_Rel *rel = &sec->reltab[j];
36813- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
36814+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
36815 rel->r_info = elf_xword_to_cpu(rel->r_info);
36816 #if (SHT_REL_TYPE == SHT_RELA)
36817 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
36818@@ -512,7 +560,7 @@ static void read_relocs(FILE *fp)
36819
36820 static void print_absolute_symbols(void)
36821 {
36822- int i;
36823+ unsigned int i;
36824 const char *format;
36825
36826 if (ELF_BITS == 64)
36827@@ -525,7 +573,7 @@ static void print_absolute_symbols(void)
36828 for (i = 0; i < ehdr.e_shnum; i++) {
36829 struct section *sec = &secs[i];
36830 char *sym_strtab;
36831- int j;
36832+ unsigned int j;
36833
36834 if (sec->shdr.sh_type != SHT_SYMTAB) {
36835 continue;
36836@@ -552,7 +600,7 @@ static void print_absolute_symbols(void)
36837
36838 static void print_absolute_relocs(void)
36839 {
36840- int i, printed = 0;
36841+ unsigned int i, printed = 0;
36842 const char *format;
36843
36844 if (ELF_BITS == 64)
36845@@ -565,7 +613,7 @@ static void print_absolute_relocs(void)
36846 struct section *sec_applies, *sec_symtab;
36847 char *sym_strtab;
36848 Elf_Sym *sh_symtab;
36849- int j;
36850+ unsigned int j;
36851 if (sec->shdr.sh_type != SHT_REL_TYPE) {
36852 continue;
36853 }
36854@@ -642,13 +690,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
36855 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
36856 Elf_Sym *sym, const char *symname))
36857 {
36858- int i;
36859+ unsigned int i;
36860 /* Walk through the relocations */
36861 for (i = 0; i < ehdr.e_shnum; i++) {
36862 char *sym_strtab;
36863 Elf_Sym *sh_symtab;
36864 struct section *sec_applies, *sec_symtab;
36865- int j;
36866+ unsigned int j;
36867 struct section *sec = &secs[i];
36868
36869 if (sec->shdr.sh_type != SHT_REL_TYPE) {
36870@@ -822,6 +870,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
36871 {
36872 unsigned r_type = ELF32_R_TYPE(rel->r_info);
36873 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
36874+ char *sym_strtab = sec->link->link->strtab;
36875+
36876+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
36877+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
36878+ return 0;
36879+
36880+#ifdef CONFIG_PAX_KERNEXEC
36881+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
36882+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
36883+ return 0;
36884+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
36885+ return 0;
36886+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
36887+ return 0;
36888+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
36889+ return 0;
36890+#endif
36891
36892 switch (r_type) {
36893 case R_386_NONE:
36894@@ -960,7 +1025,7 @@ static int write32_as_text(uint32_t v, FILE *f)
36895
36896 static void emit_relocs(int as_text, int use_real_mode)
36897 {
36898- int i;
36899+ unsigned int i;
36900 int (*write_reloc)(uint32_t, FILE *) = write32;
36901 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
36902 const char *symname);
36903@@ -1060,10 +1125,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
36904 {
36905 regex_init(use_real_mode);
36906 read_ehdr(fp);
36907+ read_phdrs(fp);
36908 read_shdrs(fp);
36909 read_strtabs(fp);
36910 read_symtabs(fp);
36911- read_relocs(fp);
36912+ read_relocs(fp, use_real_mode);
36913 if (ELF_BITS == 64)
36914 percpu_init();
36915 if (show_absolute_syms) {
36916diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
36917index f40281e..92728c9 100644
36918--- a/arch/x86/um/mem_32.c
36919+++ b/arch/x86/um/mem_32.c
36920@@ -21,7 +21,7 @@ static int __init gate_vma_init(void)
36921 gate_vma.vm_start = FIXADDR_USER_START;
36922 gate_vma.vm_end = FIXADDR_USER_END;
36923 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
36924- gate_vma.vm_page_prot = __P101;
36925+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
36926
36927 return 0;
36928 }
36929diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
36930index 80ffa5b..a33bd15 100644
36931--- a/arch/x86/um/tls_32.c
36932+++ b/arch/x86/um/tls_32.c
36933@@ -260,7 +260,7 @@ out:
36934 if (unlikely(task == current &&
36935 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
36936 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
36937- "without flushed TLS.", current->pid);
36938+ "without flushed TLS.", task_pid_nr(current));
36939 }
36940
36941 return 0;
36942diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
36943index 5a4affe..9e2d522 100644
36944--- a/arch/x86/vdso/Makefile
36945+++ b/arch/x86/vdso/Makefile
36946@@ -174,7 +174,7 @@ quiet_cmd_vdso = VDSO $@
36947 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
36948 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
36949
36950-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
36951+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
36952 $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
36953 GCOV_PROFILE := n
36954
36955diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
36956index e904c27..b9eaa03 100644
36957--- a/arch/x86/vdso/vdso32-setup.c
36958+++ b/arch/x86/vdso/vdso32-setup.c
36959@@ -14,6 +14,7 @@
36960 #include <asm/cpufeature.h>
36961 #include <asm/processor.h>
36962 #include <asm/vdso.h>
36963+#include <asm/mman.h>
36964
36965 #ifdef CONFIG_COMPAT_VDSO
36966 #define VDSO_DEFAULT 0
36967diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
36968index 970463b..da82d3e 100644
36969--- a/arch/x86/vdso/vma.c
36970+++ b/arch/x86/vdso/vma.c
36971@@ -16,10 +16,9 @@
36972 #include <asm/vdso.h>
36973 #include <asm/page.h>
36974 #include <asm/hpet.h>
36975+#include <asm/mman.h>
36976
36977 #if defined(CONFIG_X86_64)
36978-unsigned int __read_mostly vdso64_enabled = 1;
36979-
36980 extern unsigned short vdso_sync_cpuid;
36981 #endif
36982
36983@@ -101,6 +100,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
36984 .pages = no_pages,
36985 };
36986
36987+#ifdef CONFIG_PAX_RANDMMAP
36988+ if (mm->pax_flags & MF_PAX_RANDMMAP)
36989+ calculate_addr = false;
36990+#endif
36991+
36992 if (calculate_addr) {
36993 addr = vdso_addr(current->mm->start_stack,
36994 image->size - image->sym_vvar_start);
36995@@ -111,14 +115,14 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
36996 down_write(&mm->mmap_sem);
36997
36998 addr = get_unmapped_area(NULL, addr,
36999- image->size - image->sym_vvar_start, 0, 0);
37000+ image->size - image->sym_vvar_start, 0, MAP_EXECUTABLE);
37001 if (IS_ERR_VALUE(addr)) {
37002 ret = addr;
37003 goto up_fail;
37004 }
37005
37006 text_start = addr - image->sym_vvar_start;
37007- current->mm->context.vdso = (void __user *)text_start;
37008+ mm->context.vdso = text_start;
37009
37010 /*
37011 * MAYWRITE to allow gdb to COW and set breakpoints
37012@@ -163,15 +167,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
37013 hpet_address >> PAGE_SHIFT,
37014 PAGE_SIZE,
37015 pgprot_noncached(PAGE_READONLY));
37016-
37017- if (ret)
37018- goto up_fail;
37019 }
37020 #endif
37021
37022 up_fail:
37023 if (ret)
37024- current->mm->context.vdso = NULL;
37025+ current->mm->context.vdso = 0;
37026
37027 up_write(&mm->mmap_sem);
37028 return ret;
37029@@ -191,8 +192,8 @@ static int load_vdso32(void)
37030
37031 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
37032 current_thread_info()->sysenter_return =
37033- current->mm->context.vdso +
37034- selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
37035+ (void __force_user *)(current->mm->context.vdso +
37036+ selected_vdso32->sym_VDSO32_SYSENTER_RETURN);
37037
37038 return 0;
37039 }
37040@@ -201,9 +202,6 @@ static int load_vdso32(void)
37041 #ifdef CONFIG_X86_64
37042 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
37043 {
37044- if (!vdso64_enabled)
37045- return 0;
37046-
37047 return map_vdso(&vdso_image_64, true);
37048 }
37049
37050@@ -212,12 +210,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
37051 int uses_interp)
37052 {
37053 #ifdef CONFIG_X86_X32_ABI
37054- if (test_thread_flag(TIF_X32)) {
37055- if (!vdso64_enabled)
37056- return 0;
37057-
37058+ if (test_thread_flag(TIF_X32))
37059 return map_vdso(&vdso_image_x32, true);
37060- }
37061 #endif
37062
37063 return load_vdso32();
37064@@ -229,12 +223,3 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
37065 return load_vdso32();
37066 }
37067 #endif
37068-
37069-#ifdef CONFIG_X86_64
37070-static __init int vdso_setup(char *s)
37071-{
37072- vdso64_enabled = simple_strtoul(s, NULL, 0);
37073- return 0;
37074-}
37075-__setup("vdso=", vdso_setup);
37076-#endif
37077diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
37078index e88fda8..76ce7ce 100644
37079--- a/arch/x86/xen/Kconfig
37080+++ b/arch/x86/xen/Kconfig
37081@@ -9,6 +9,7 @@ config XEN
37082 select XEN_HAVE_PVMMU
37083 depends on X86_64 || (X86_32 && X86_PAE)
37084 depends on X86_TSC
37085+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
37086 help
37087 This is the Linux Xen port. Enabling this will allow the
37088 kernel to boot in a paravirtualized environment under the
37089diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
37090index c0cb11f..bed56ff 100644
37091--- a/arch/x86/xen/enlighten.c
37092+++ b/arch/x86/xen/enlighten.c
37093@@ -123,8 +123,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
37094
37095 struct shared_info xen_dummy_shared_info;
37096
37097-void *xen_initial_gdt;
37098-
37099 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
37100 __read_mostly int xen_have_vector_callback;
37101 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
37102@@ -542,8 +540,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
37103 {
37104 unsigned long va = dtr->address;
37105 unsigned int size = dtr->size + 1;
37106- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
37107- unsigned long frames[pages];
37108+ unsigned long frames[65536 / PAGE_SIZE];
37109 int f;
37110
37111 /*
37112@@ -591,8 +588,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
37113 {
37114 unsigned long va = dtr->address;
37115 unsigned int size = dtr->size + 1;
37116- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
37117- unsigned long frames[pages];
37118+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
37119 int f;
37120
37121 /*
37122@@ -600,7 +596,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
37123 * 8-byte entries, or 16 4k pages..
37124 */
37125
37126- BUG_ON(size > 65536);
37127+ BUG_ON(size > GDT_SIZE);
37128 BUG_ON(va & ~PAGE_MASK);
37129
37130 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
37131@@ -989,7 +985,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
37132 return 0;
37133 }
37134
37135-static void set_xen_basic_apic_ops(void)
37136+static void __init set_xen_basic_apic_ops(void)
37137 {
37138 apic->read = xen_apic_read;
37139 apic->write = xen_apic_write;
37140@@ -1295,30 +1291,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
37141 #endif
37142 };
37143
37144-static void xen_reboot(int reason)
37145+static __noreturn void xen_reboot(int reason)
37146 {
37147 struct sched_shutdown r = { .reason = reason };
37148
37149- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
37150- BUG();
37151+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
37152+ BUG();
37153 }
37154
37155-static void xen_restart(char *msg)
37156+static __noreturn void xen_restart(char *msg)
37157 {
37158 xen_reboot(SHUTDOWN_reboot);
37159 }
37160
37161-static void xen_emergency_restart(void)
37162+static __noreturn void xen_emergency_restart(void)
37163 {
37164 xen_reboot(SHUTDOWN_reboot);
37165 }
37166
37167-static void xen_machine_halt(void)
37168+static __noreturn void xen_machine_halt(void)
37169 {
37170 xen_reboot(SHUTDOWN_poweroff);
37171 }
37172
37173-static void xen_machine_power_off(void)
37174+static __noreturn void xen_machine_power_off(void)
37175 {
37176 if (pm_power_off)
37177 pm_power_off();
37178@@ -1568,7 +1564,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
37179 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
37180
37181 /* Work out if we support NX */
37182- x86_configure_nx();
37183+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
37184+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
37185+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
37186+ unsigned l, h;
37187+
37188+ __supported_pte_mask |= _PAGE_NX;
37189+ rdmsr(MSR_EFER, l, h);
37190+ l |= EFER_NX;
37191+ wrmsr(MSR_EFER, l, h);
37192+ }
37193+#endif
37194
37195 /* Get mfn list */
37196 xen_build_dynamic_phys_to_machine();
37197@@ -1596,13 +1602,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
37198
37199 machine_ops = xen_machine_ops;
37200
37201- /*
37202- * The only reliable way to retain the initial address of the
37203- * percpu gdt_page is to remember it here, so we can go and
37204- * mark it RW later, when the initial percpu area is freed.
37205- */
37206- xen_initial_gdt = &per_cpu(gdt_page, 0);
37207-
37208 xen_smp_init();
37209
37210 #ifdef CONFIG_ACPI_NUMA
37211diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
37212index 16fb009..02b7801 100644
37213--- a/arch/x86/xen/mmu.c
37214+++ b/arch/x86/xen/mmu.c
37215@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
37216 return val;
37217 }
37218
37219-static pteval_t pte_pfn_to_mfn(pteval_t val)
37220+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
37221 {
37222 if (val & _PAGE_PRESENT) {
37223 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
37224@@ -1904,7 +1904,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
37225 * L3_k[511] -> level2_fixmap_pgt */
37226 convert_pfn_mfn(level3_kernel_pgt);
37227
37228+ convert_pfn_mfn(level3_vmalloc_start_pgt);
37229+ convert_pfn_mfn(level3_vmalloc_end_pgt);
37230+ convert_pfn_mfn(level3_vmemmap_pgt);
37231 /* L3_k[511][506] -> level1_fixmap_pgt */
37232+ /* L3_k[511][507] -> level1_vsyscall_pgt */
37233 convert_pfn_mfn(level2_fixmap_pgt);
37234 }
37235 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
37236@@ -1929,11 +1933,16 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
37237 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
37238 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
37239 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
37240+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
37241+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
37242+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
37243 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
37244 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
37245+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
37246 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
37247 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
37248 set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
37249+ set_page_prot(level1_vsyscall_pgt, PAGE_KERNEL_RO);
37250
37251 /* Pin down new L4 */
37252 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
37253@@ -2117,6 +2126,7 @@ static void __init xen_post_allocator_init(void)
37254 pv_mmu_ops.set_pud = xen_set_pud;
37255 #if PAGETABLE_LEVELS == 4
37256 pv_mmu_ops.set_pgd = xen_set_pgd;
37257+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
37258 #endif
37259
37260 /* This will work as long as patching hasn't happened yet
37261@@ -2195,6 +2205,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
37262 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
37263 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
37264 .set_pgd = xen_set_pgd_hyper,
37265+ .set_pgd_batched = xen_set_pgd_hyper,
37266
37267 .alloc_pud = xen_alloc_pmd_init,
37268 .release_pud = xen_release_pmd_init,
37269diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
37270index 7005974..54fb05f 100644
37271--- a/arch/x86/xen/smp.c
37272+++ b/arch/x86/xen/smp.c
37273@@ -283,17 +283,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
37274
37275 if (xen_pv_domain()) {
37276 if (!xen_feature(XENFEAT_writable_page_tables))
37277- /* We've switched to the "real" per-cpu gdt, so make
37278- * sure the old memory can be recycled. */
37279- make_lowmem_page_readwrite(xen_initial_gdt);
37280-
37281 #ifdef CONFIG_X86_32
37282 /*
37283 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
37284 * expects __USER_DS
37285 */
37286- loadsegment(ds, __USER_DS);
37287- loadsegment(es, __USER_DS);
37288+ loadsegment(ds, __KERNEL_DS);
37289+ loadsegment(es, __KERNEL_DS);
37290 #endif
37291
37292 xen_filter_cpu_maps();
37293@@ -372,7 +368,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
37294 #ifdef CONFIG_X86_32
37295 /* Note: PVH is not yet supported on x86_32. */
37296 ctxt->user_regs.fs = __KERNEL_PERCPU;
37297- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
37298+ savesegment(gs, ctxt->user_regs.gs);
37299 #endif
37300 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
37301
37302@@ -381,8 +377,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
37303 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
37304 ctxt->flags = VGCF_IN_KERNEL;
37305 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
37306- ctxt->user_regs.ds = __USER_DS;
37307- ctxt->user_regs.es = __USER_DS;
37308+ ctxt->user_regs.ds = __KERNEL_DS;
37309+ ctxt->user_regs.es = __KERNEL_DS;
37310 ctxt->user_regs.ss = __KERNEL_DS;
37311
37312 xen_copy_trap_info(ctxt->trap_ctxt);
37313@@ -437,14 +433,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
37314 int rc;
37315
37316 per_cpu(current_task, cpu) = idle;
37317+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
37318 #ifdef CONFIG_X86_32
37319 irq_ctx_init(cpu);
37320 #else
37321 clear_tsk_thread_flag(idle, TIF_FORK);
37322 #endif
37323- per_cpu(kernel_stack, cpu) =
37324- (unsigned long)task_stack_page(idle) -
37325- KERNEL_STACK_OFFSET + THREAD_SIZE;
37326+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
37327
37328 xen_setup_runstate_info(cpu);
37329 xen_setup_timer(cpu);
37330@@ -720,7 +715,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
37331
37332 void __init xen_smp_init(void)
37333 {
37334- smp_ops = xen_smp_ops;
37335+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
37336 xen_fill_possible_map();
37337 }
37338
37339diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
37340index fd92a64..1f72641 100644
37341--- a/arch/x86/xen/xen-asm_32.S
37342+++ b/arch/x86/xen/xen-asm_32.S
37343@@ -99,7 +99,7 @@ ENTRY(xen_iret)
37344 pushw %fs
37345 movl $(__KERNEL_PERCPU), %eax
37346 movl %eax, %fs
37347- movl %fs:xen_vcpu, %eax
37348+ mov PER_CPU_VAR(xen_vcpu), %eax
37349 POP_FS
37350 #else
37351 movl %ss:xen_vcpu, %eax
37352diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
37353index 485b695..fda3e7c 100644
37354--- a/arch/x86/xen/xen-head.S
37355+++ b/arch/x86/xen/xen-head.S
37356@@ -39,6 +39,17 @@ ENTRY(startup_xen)
37357 #ifdef CONFIG_X86_32
37358 mov %esi,xen_start_info
37359 mov $init_thread_union+THREAD_SIZE,%esp
37360+#ifdef CONFIG_SMP
37361+ movl $cpu_gdt_table,%edi
37362+ movl $__per_cpu_load,%eax
37363+ movw %ax,__KERNEL_PERCPU + 2(%edi)
37364+ rorl $16,%eax
37365+ movb %al,__KERNEL_PERCPU + 4(%edi)
37366+ movb %ah,__KERNEL_PERCPU + 7(%edi)
37367+ movl $__per_cpu_end - 1,%eax
37368+ subl $__per_cpu_start,%eax
37369+ movw %ax,__KERNEL_PERCPU + 0(%edi)
37370+#endif
37371 #else
37372 mov %rsi,xen_start_info
37373 mov $init_thread_union+THREAD_SIZE,%rsp
37374diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
37375index 28c7e0b..2acfec7 100644
37376--- a/arch/x86/xen/xen-ops.h
37377+++ b/arch/x86/xen/xen-ops.h
37378@@ -10,8 +10,6 @@
37379 extern const char xen_hypervisor_callback[];
37380 extern const char xen_failsafe_callback[];
37381
37382-extern void *xen_initial_gdt;
37383-
37384 struct trap_info;
37385 void xen_copy_trap_info(struct trap_info *traps);
37386
37387diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
37388index 525bd3d..ef888b1 100644
37389--- a/arch/xtensa/variants/dc232b/include/variant/core.h
37390+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
37391@@ -119,9 +119,9 @@
37392 ----------------------------------------------------------------------*/
37393
37394 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
37395-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
37396 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
37397 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
37398+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
37399
37400 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
37401 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
37402diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
37403index 2f33760..835e50a 100644
37404--- a/arch/xtensa/variants/fsf/include/variant/core.h
37405+++ b/arch/xtensa/variants/fsf/include/variant/core.h
37406@@ -11,6 +11,7 @@
37407 #ifndef _XTENSA_CORE_H
37408 #define _XTENSA_CORE_H
37409
37410+#include <linux/const.h>
37411
37412 /****************************************************************************
37413 Parameters Useful for Any Code, USER or PRIVILEGED
37414@@ -112,9 +113,9 @@
37415 ----------------------------------------------------------------------*/
37416
37417 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
37418-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
37419 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
37420 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
37421+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
37422
37423 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
37424 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
37425diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
37426index af00795..2bb8105 100644
37427--- a/arch/xtensa/variants/s6000/include/variant/core.h
37428+++ b/arch/xtensa/variants/s6000/include/variant/core.h
37429@@ -11,6 +11,7 @@
37430 #ifndef _XTENSA_CORE_CONFIGURATION_H
37431 #define _XTENSA_CORE_CONFIGURATION_H
37432
37433+#include <linux/const.h>
37434
37435 /****************************************************************************
37436 Parameters Useful for Any Code, USER or PRIVILEGED
37437@@ -118,9 +119,9 @@
37438 ----------------------------------------------------------------------*/
37439
37440 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
37441-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
37442 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
37443 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
37444+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
37445
37446 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
37447 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
37448diff --git a/block/bio.c b/block/bio.c
37449index 3e6331d..f970433 100644
37450--- a/block/bio.c
37451+++ b/block/bio.c
37452@@ -1160,7 +1160,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
37453 /*
37454 * Overflow, abort
37455 */
37456- if (end < start)
37457+ if (end < start || end - start > INT_MAX - nr_pages)
37458 return ERR_PTR(-EINVAL);
37459
37460 nr_pages += end - start;
37461@@ -1294,7 +1294,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
37462 /*
37463 * Overflow, abort
37464 */
37465- if (end < start)
37466+ if (end < start || end - start > INT_MAX - nr_pages)
37467 return ERR_PTR(-EINVAL);
37468
37469 nr_pages += end - start;
37470@@ -1556,7 +1556,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
37471 const int read = bio_data_dir(bio) == READ;
37472 struct bio_map_data *bmd = bio->bi_private;
37473 int i;
37474- char *p = bmd->sgvecs[0].iov_base;
37475+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
37476
37477 bio_for_each_segment_all(bvec, bio, i) {
37478 char *addr = page_address(bvec->bv_page);
37479diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
37480index e17da94..e01cce1 100644
37481--- a/block/blk-cgroup.c
37482+++ b/block/blk-cgroup.c
37483@@ -822,7 +822,7 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
37484 static struct cgroup_subsys_state *
37485 blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
37486 {
37487- static atomic64_t id_seq = ATOMIC64_INIT(0);
37488+ static atomic64_unchecked_t id_seq = ATOMIC64_INIT(0);
37489 struct blkcg *blkcg;
37490
37491 if (!parent_css) {
37492@@ -836,7 +836,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
37493
37494 blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
37495 blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
37496- blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
37497+ blkcg->id = atomic64_inc_return_unchecked(&id_seq); /* root is 0, start from 1 */
37498 done:
37499 spin_lock_init(&blkcg->lock);
37500 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
37501diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
37502index 0736729..2ec3b48 100644
37503--- a/block/blk-iopoll.c
37504+++ b/block/blk-iopoll.c
37505@@ -74,7 +74,7 @@ void blk_iopoll_complete(struct blk_iopoll *iop)
37506 }
37507 EXPORT_SYMBOL(blk_iopoll_complete);
37508
37509-static void blk_iopoll_softirq(struct softirq_action *h)
37510+static __latent_entropy void blk_iopoll_softirq(void)
37511 {
37512 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
37513 int rearm = 0, budget = blk_iopoll_budget;
37514diff --git a/block/blk-map.c b/block/blk-map.c
37515index f890d43..97b0482 100644
37516--- a/block/blk-map.c
37517+++ b/block/blk-map.c
37518@@ -300,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
37519 if (!len || !kbuf)
37520 return -EINVAL;
37521
37522- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
37523+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
37524 if (do_copy)
37525 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
37526 else
37527diff --git a/block/blk-softirq.c b/block/blk-softirq.c
37528index 53b1737..08177d2e 100644
37529--- a/block/blk-softirq.c
37530+++ b/block/blk-softirq.c
37531@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
37532 * Softirq action handler - move entries to local list and loop over them
37533 * while passing them to the queue registered handler.
37534 */
37535-static void blk_done_softirq(struct softirq_action *h)
37536+static __latent_entropy void blk_done_softirq(void)
37537 {
37538 struct list_head *cpu_list, local_list;
37539
37540diff --git a/block/bsg.c b/block/bsg.c
37541index ff46add..c4ba8ee 100644
37542--- a/block/bsg.c
37543+++ b/block/bsg.c
37544@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
37545 struct sg_io_v4 *hdr, struct bsg_device *bd,
37546 fmode_t has_write_perm)
37547 {
37548+ unsigned char tmpcmd[sizeof(rq->__cmd)];
37549+ unsigned char *cmdptr;
37550+
37551 if (hdr->request_len > BLK_MAX_CDB) {
37552 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
37553 if (!rq->cmd)
37554 return -ENOMEM;
37555- }
37556+ cmdptr = rq->cmd;
37557+ } else
37558+ cmdptr = tmpcmd;
37559
37560- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
37561+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
37562 hdr->request_len))
37563 return -EFAULT;
37564
37565+ if (cmdptr != rq->cmd)
37566+ memcpy(rq->cmd, cmdptr, hdr->request_len);
37567+
37568 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
37569 if (blk_verify_command(rq->cmd, has_write_perm))
37570 return -EPERM;
37571diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
37572index 18b282c..050dbe5 100644
37573--- a/block/compat_ioctl.c
37574+++ b/block/compat_ioctl.c
37575@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
37576 cgc = compat_alloc_user_space(sizeof(*cgc));
37577 cgc32 = compat_ptr(arg);
37578
37579- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
37580+ if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
37581 get_user(data, &cgc32->buffer) ||
37582 put_user(compat_ptr(data), &cgc->buffer) ||
37583 copy_in_user(&cgc->buflen, &cgc32->buflen,
37584@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
37585 err |= __get_user(f->spec1, &uf->spec1);
37586 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
37587 err |= __get_user(name, &uf->name);
37588- f->name = compat_ptr(name);
37589+ f->name = (void __force_kernel *)compat_ptr(name);
37590 if (err) {
37591 err = -EFAULT;
37592 goto out;
37593diff --git a/block/genhd.c b/block/genhd.c
37594index e6723bd..703e4ac 100644
37595--- a/block/genhd.c
37596+++ b/block/genhd.c
37597@@ -469,21 +469,24 @@ static char *bdevt_str(dev_t devt, char *buf)
37598
37599 /*
37600 * Register device numbers dev..(dev+range-1)
37601- * range must be nonzero
37602+ * Noop if @range is zero.
37603 * The hash chain is sorted on range, so that subranges can override.
37604 */
37605 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
37606 struct kobject *(*probe)(dev_t, int *, void *),
37607 int (*lock)(dev_t, void *), void *data)
37608 {
37609- kobj_map(bdev_map, devt, range, module, probe, lock, data);
37610+ if (range)
37611+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
37612 }
37613
37614 EXPORT_SYMBOL(blk_register_region);
37615
37616+/* undo blk_register_region(), noop if @range is zero */
37617 void blk_unregister_region(dev_t devt, unsigned long range)
37618 {
37619- kobj_unmap(bdev_map, devt, range);
37620+ if (range)
37621+ kobj_unmap(bdev_map, devt, range);
37622 }
37623
37624 EXPORT_SYMBOL(blk_unregister_region);
37625diff --git a/block/partitions/efi.c b/block/partitions/efi.c
37626index 56d08fd..2e07090 100644
37627--- a/block/partitions/efi.c
37628+++ b/block/partitions/efi.c
37629@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
37630 if (!gpt)
37631 return NULL;
37632
37633+ if (!le32_to_cpu(gpt->num_partition_entries))
37634+ return NULL;
37635+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
37636+ if (!pte)
37637+ return NULL;
37638+
37639 count = le32_to_cpu(gpt->num_partition_entries) *
37640 le32_to_cpu(gpt->sizeof_partition_entry);
37641- if (!count)
37642- return NULL;
37643- pte = kmalloc(count, GFP_KERNEL);
37644- if (!pte)
37645- return NULL;
37646-
37647 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
37648 (u8 *) pte, count) < count) {
37649 kfree(pte);
37650diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
37651index 9b8eaec..c20279a 100644
37652--- a/block/scsi_ioctl.c
37653+++ b/block/scsi_ioctl.c
37654@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
37655 return put_user(0, p);
37656 }
37657
37658-static int sg_get_timeout(struct request_queue *q)
37659+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
37660 {
37661 return jiffies_to_clock_t(q->sg_timeout);
37662 }
37663@@ -227,8 +227,20 @@ EXPORT_SYMBOL(blk_verify_command);
37664 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
37665 struct sg_io_hdr *hdr, fmode_t mode)
37666 {
37667- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
37668+ unsigned char tmpcmd[sizeof(rq->__cmd)];
37669+ unsigned char *cmdptr;
37670+
37671+ if (rq->cmd != rq->__cmd)
37672+ cmdptr = rq->cmd;
37673+ else
37674+ cmdptr = tmpcmd;
37675+
37676+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
37677 return -EFAULT;
37678+
37679+ if (cmdptr != rq->cmd)
37680+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
37681+
37682 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
37683 return -EPERM;
37684
37685@@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
37686 int err;
37687 unsigned int in_len, out_len, bytes, opcode, cmdlen;
37688 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
37689+ unsigned char tmpcmd[sizeof(rq->__cmd)];
37690+ unsigned char *cmdptr;
37691
37692 if (!sic)
37693 return -EINVAL;
37694@@ -470,9 +484,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
37695 */
37696 err = -EFAULT;
37697 rq->cmd_len = cmdlen;
37698- if (copy_from_user(rq->cmd, sic->data, cmdlen))
37699+
37700+ if (rq->cmd != rq->__cmd)
37701+ cmdptr = rq->cmd;
37702+ else
37703+ cmdptr = tmpcmd;
37704+
37705+ if (copy_from_user(cmdptr, sic->data, cmdlen))
37706 goto error;
37707
37708+ if (rq->cmd != cmdptr)
37709+ memcpy(rq->cmd, cmdptr, cmdlen);
37710+
37711 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
37712 goto error;
37713
37714diff --git a/crypto/cryptd.c b/crypto/cryptd.c
37715index e592c90..c566114 100644
37716--- a/crypto/cryptd.c
37717+++ b/crypto/cryptd.c
37718@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
37719
37720 struct cryptd_blkcipher_request_ctx {
37721 crypto_completion_t complete;
37722-};
37723+} __no_const;
37724
37725 struct cryptd_hash_ctx {
37726 struct crypto_shash *child;
37727@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
37728
37729 struct cryptd_aead_request_ctx {
37730 crypto_completion_t complete;
37731-};
37732+} __no_const;
37733
37734 static void cryptd_queue_worker(struct work_struct *work);
37735
37736diff --git a/crypto/cts.c b/crypto/cts.c
37737index 042223f..133f087 100644
37738--- a/crypto/cts.c
37739+++ b/crypto/cts.c
37740@@ -202,7 +202,8 @@ static int cts_cbc_decrypt(struct crypto_cts_ctx *ctx,
37741 /* 5. Append the tail (BB - Ln) bytes of Xn (tmp) to Cn to create En */
37742 memcpy(s + bsize + lastn, tmp + lastn, bsize - lastn);
37743 /* 6. Decrypt En to create Pn-1 */
37744- memset(iv, 0, sizeof(iv));
37745+ memzero_explicit(iv, sizeof(iv));
37746+
37747 sg_set_buf(&sgsrc[0], s + bsize, bsize);
37748 sg_set_buf(&sgdst[0], d, bsize);
37749 err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize);
37750diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
37751index 309d345..1632720 100644
37752--- a/crypto/pcrypt.c
37753+++ b/crypto/pcrypt.c
37754@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
37755 int ret;
37756
37757 pinst->kobj.kset = pcrypt_kset;
37758- ret = kobject_add(&pinst->kobj, NULL, name);
37759+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
37760 if (!ret)
37761 kobject_uevent(&pinst->kobj, KOBJ_ADD);
37762
37763diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
37764index 4279480..7bb0474 100644
37765--- a/crypto/sha1_generic.c
37766+++ b/crypto/sha1_generic.c
37767@@ -64,7 +64,7 @@ int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
37768 src = data + done;
37769 } while (done + SHA1_BLOCK_SIZE <= len);
37770
37771- memset(temp, 0, sizeof(temp));
37772+ memzero_explicit(temp, sizeof(temp));
37773 partial = 0;
37774 }
37775 memcpy(sctx->buffer + partial, src, len - done);
37776diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
37777index 5433667..32c5e5e 100644
37778--- a/crypto/sha256_generic.c
37779+++ b/crypto/sha256_generic.c
37780@@ -210,10 +210,9 @@ static void sha256_transform(u32 *state, const u8 *input)
37781
37782 /* clear any sensitive info... */
37783 a = b = c = d = e = f = g = h = t1 = t2 = 0;
37784- memset(W, 0, 64 * sizeof(u32));
37785+ memzero_explicit(W, 64 * sizeof(u32));
37786 }
37787
37788-
37789 static int sha224_init(struct shash_desc *desc)
37790 {
37791 struct sha256_state *sctx = shash_desc_ctx(desc);
37792@@ -316,7 +315,7 @@ static int sha224_final(struct shash_desc *desc, u8 *hash)
37793 sha256_final(desc, D);
37794
37795 memcpy(hash, D, SHA224_DIGEST_SIZE);
37796- memset(D, 0, SHA256_DIGEST_SIZE);
37797+ memzero_explicit(D, SHA256_DIGEST_SIZE);
37798
37799 return 0;
37800 }
37801diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
37802index 6ed124f..04d295a 100644
37803--- a/crypto/sha512_generic.c
37804+++ b/crypto/sha512_generic.c
37805@@ -238,7 +238,7 @@ static int sha384_final(struct shash_desc *desc, u8 *hash)
37806 sha512_final(desc, D);
37807
37808 memcpy(hash, D, 48);
37809- memset(D, 0, 64);
37810+ memzero_explicit(D, 64);
37811
37812 return 0;
37813 }
37814diff --git a/crypto/tgr192.c b/crypto/tgr192.c
37815index 8740355..3c7af0d 100644
37816--- a/crypto/tgr192.c
37817+++ b/crypto/tgr192.c
37818@@ -612,7 +612,7 @@ static int tgr160_final(struct shash_desc *desc, u8 * out)
37819
37820 tgr192_final(desc, D);
37821 memcpy(out, D, TGR160_DIGEST_SIZE);
37822- memset(D, 0, TGR192_DIGEST_SIZE);
37823+ memzero_explicit(D, TGR192_DIGEST_SIZE);
37824
37825 return 0;
37826 }
37827@@ -623,7 +623,7 @@ static int tgr128_final(struct shash_desc *desc, u8 * out)
37828
37829 tgr192_final(desc, D);
37830 memcpy(out, D, TGR128_DIGEST_SIZE);
37831- memset(D, 0, TGR192_DIGEST_SIZE);
37832+ memzero_explicit(D, TGR192_DIGEST_SIZE);
37833
37834 return 0;
37835 }
37836diff --git a/crypto/vmac.c b/crypto/vmac.c
37837index 2eb11a3..d84c24b 100644
37838--- a/crypto/vmac.c
37839+++ b/crypto/vmac.c
37840@@ -613,7 +613,7 @@ static int vmac_final(struct shash_desc *pdesc, u8 *out)
37841 }
37842 mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx);
37843 memcpy(out, &mac, sizeof(vmac_t));
37844- memset(&mac, 0, sizeof(vmac_t));
37845+ memzero_explicit(&mac, sizeof(vmac_t));
37846 memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
37847 ctx->partial_size = 0;
37848 return 0;
37849diff --git a/crypto/wp512.c b/crypto/wp512.c
37850index 180f1d6..ec64e77 100644
37851--- a/crypto/wp512.c
37852+++ b/crypto/wp512.c
37853@@ -1102,8 +1102,8 @@ static int wp384_final(struct shash_desc *desc, u8 *out)
37854 u8 D[64];
37855
37856 wp512_final(desc, D);
37857- memcpy (out, D, WP384_DIGEST_SIZE);
37858- memset (D, 0, WP512_DIGEST_SIZE);
37859+ memcpy(out, D, WP384_DIGEST_SIZE);
37860+ memzero_explicit(D, WP512_DIGEST_SIZE);
37861
37862 return 0;
37863 }
37864@@ -1113,8 +1113,8 @@ static int wp256_final(struct shash_desc *desc, u8 *out)
37865 u8 D[64];
37866
37867 wp512_final(desc, D);
37868- memcpy (out, D, WP256_DIGEST_SIZE);
37869- memset (D, 0, WP512_DIGEST_SIZE);
37870+ memcpy(out, D, WP256_DIGEST_SIZE);
37871+ memzero_explicit(D, WP512_DIGEST_SIZE);
37872
37873 return 0;
37874 }
37875diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
37876index 6921c7f..78e1af7 100644
37877--- a/drivers/acpi/acpica/hwxfsleep.c
37878+++ b/drivers/acpi/acpica/hwxfsleep.c
37879@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
37880 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
37881
37882 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
37883- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
37884- acpi_hw_extended_sleep},
37885- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
37886- acpi_hw_extended_wake_prep},
37887- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
37888+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
37889+ .extended_function = acpi_hw_extended_sleep},
37890+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
37891+ .extended_function = acpi_hw_extended_wake_prep},
37892+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
37893+ .extended_function = acpi_hw_extended_wake}
37894 };
37895
37896 /*
37897diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
37898index 16129c7..8b675cd 100644
37899--- a/drivers/acpi/apei/apei-internal.h
37900+++ b/drivers/acpi/apei/apei-internal.h
37901@@ -19,7 +19,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
37902 struct apei_exec_ins_type {
37903 u32 flags;
37904 apei_exec_ins_func_t run;
37905-};
37906+} __do_const;
37907
37908 struct apei_exec_context {
37909 u32 ip;
37910diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
37911index fc5f780..e5ac91a 100644
37912--- a/drivers/acpi/apei/ghes.c
37913+++ b/drivers/acpi/apei/ghes.c
37914@@ -478,7 +478,7 @@ static void __ghes_print_estatus(const char *pfx,
37915 const struct acpi_hest_generic *generic,
37916 const struct acpi_hest_generic_status *estatus)
37917 {
37918- static atomic_t seqno;
37919+ static atomic_unchecked_t seqno;
37920 unsigned int curr_seqno;
37921 char pfx_seq[64];
37922
37923@@ -489,7 +489,7 @@ static void __ghes_print_estatus(const char *pfx,
37924 else
37925 pfx = KERN_ERR;
37926 }
37927- curr_seqno = atomic_inc_return(&seqno);
37928+ curr_seqno = atomic_inc_return_unchecked(&seqno);
37929 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
37930 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
37931 pfx_seq, generic->header.source_id);
37932diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
37933index a83e3c6..c3d617f 100644
37934--- a/drivers/acpi/bgrt.c
37935+++ b/drivers/acpi/bgrt.c
37936@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
37937 if (!bgrt_image)
37938 return -ENODEV;
37939
37940- bin_attr_image.private = bgrt_image;
37941- bin_attr_image.size = bgrt_image_size;
37942+ pax_open_kernel();
37943+ *(void **)&bin_attr_image.private = bgrt_image;
37944+ *(size_t *)&bin_attr_image.size = bgrt_image_size;
37945+ pax_close_kernel();
37946
37947 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
37948 if (!bgrt_kobj)
37949diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
37950index 36eb42e..3b2f47e 100644
37951--- a/drivers/acpi/blacklist.c
37952+++ b/drivers/acpi/blacklist.c
37953@@ -51,7 +51,7 @@ struct acpi_blacklist_item {
37954 u32 is_critical_error;
37955 };
37956
37957-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
37958+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
37959
37960 /*
37961 * POLICY: If *anything* doesn't work, put it on the blacklist.
37962@@ -163,7 +163,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
37963 return 0;
37964 }
37965
37966-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
37967+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
37968 {
37969 .callback = dmi_disable_osi_vista,
37970 .ident = "Fujitsu Siemens",
37971diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
37972index c68e724..e863008 100644
37973--- a/drivers/acpi/custom_method.c
37974+++ b/drivers/acpi/custom_method.c
37975@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
37976 struct acpi_table_header table;
37977 acpi_status status;
37978
37979+#ifdef CONFIG_GRKERNSEC_KMEM
37980+ return -EPERM;
37981+#endif
37982+
37983 if (!(*ppos)) {
37984 /* parse the table header to get the table length */
37985 if (count <= sizeof(struct acpi_table_header))
37986diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
37987index 17f9ec5..d9a455e 100644
37988--- a/drivers/acpi/processor_idle.c
37989+++ b/drivers/acpi/processor_idle.c
37990@@ -952,7 +952,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
37991 {
37992 int i, count = CPUIDLE_DRIVER_STATE_START;
37993 struct acpi_processor_cx *cx;
37994- struct cpuidle_state *state;
37995+ cpuidle_state_no_const *state;
37996 struct cpuidle_driver *drv = &acpi_idle_driver;
37997
37998 if (!pr->flags.power_setup_done)
37999diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
38000index 38cb978..352c761 100644
38001--- a/drivers/acpi/sysfs.c
38002+++ b/drivers/acpi/sysfs.c
38003@@ -423,11 +423,11 @@ static u32 num_counters;
38004 static struct attribute **all_attrs;
38005 static u32 acpi_gpe_count;
38006
38007-static struct attribute_group interrupt_stats_attr_group = {
38008+static attribute_group_no_const interrupt_stats_attr_group = {
38009 .name = "interrupts",
38010 };
38011
38012-static struct kobj_attribute *counter_attrs;
38013+static kobj_attribute_no_const *counter_attrs;
38014
38015 static void delete_gpe_attr_array(void)
38016 {
38017diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
38018index b784e9d..a69a049 100644
38019--- a/drivers/ata/libahci.c
38020+++ b/drivers/ata/libahci.c
38021@@ -1252,7 +1252,7 @@ int ahci_kick_engine(struct ata_port *ap)
38022 }
38023 EXPORT_SYMBOL_GPL(ahci_kick_engine);
38024
38025-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
38026+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
38027 struct ata_taskfile *tf, int is_cmd, u16 flags,
38028 unsigned long timeout_msec)
38029 {
38030diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
38031index 6f67490..f951ead 100644
38032--- a/drivers/ata/libata-core.c
38033+++ b/drivers/ata/libata-core.c
38034@@ -99,7 +99,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
38035 static void ata_dev_xfermask(struct ata_device *dev);
38036 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
38037
38038-atomic_t ata_print_id = ATOMIC_INIT(0);
38039+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
38040
38041 struct ata_force_param {
38042 const char *name;
38043@@ -4797,7 +4797,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
38044 struct ata_port *ap;
38045 unsigned int tag;
38046
38047- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
38048+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
38049 ap = qc->ap;
38050
38051 qc->flags = 0;
38052@@ -4813,7 +4813,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
38053 struct ata_port *ap;
38054 struct ata_link *link;
38055
38056- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
38057+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
38058 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
38059 ap = qc->ap;
38060 link = qc->dev->link;
38061@@ -5917,6 +5917,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
38062 return;
38063
38064 spin_lock(&lock);
38065+ pax_open_kernel();
38066
38067 for (cur = ops->inherits; cur; cur = cur->inherits) {
38068 void **inherit = (void **)cur;
38069@@ -5930,8 +5931,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
38070 if (IS_ERR(*pp))
38071 *pp = NULL;
38072
38073- ops->inherits = NULL;
38074+ *(struct ata_port_operations **)&ops->inherits = NULL;
38075
38076+ pax_close_kernel();
38077 spin_unlock(&lock);
38078 }
38079
38080@@ -6127,7 +6129,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
38081
38082 /* give ports names and add SCSI hosts */
38083 for (i = 0; i < host->n_ports; i++) {
38084- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
38085+ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
38086 host->ports[i]->local_port_no = i + 1;
38087 }
38088
38089diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
38090index 0586f66..1a8f74a 100644
38091--- a/drivers/ata/libata-scsi.c
38092+++ b/drivers/ata/libata-scsi.c
38093@@ -4151,7 +4151,7 @@ int ata_sas_port_init(struct ata_port *ap)
38094
38095 if (rc)
38096 return rc;
38097- ap->print_id = atomic_inc_return(&ata_print_id);
38098+ ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
38099 return 0;
38100 }
38101 EXPORT_SYMBOL_GPL(ata_sas_port_init);
38102diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
38103index 5f4e0cc..ff2c347 100644
38104--- a/drivers/ata/libata.h
38105+++ b/drivers/ata/libata.h
38106@@ -53,7 +53,7 @@ enum {
38107 ATA_DNXFER_QUIET = (1 << 31),
38108 };
38109
38110-extern atomic_t ata_print_id;
38111+extern atomic_unchecked_t ata_print_id;
38112 extern int atapi_passthru16;
38113 extern int libata_fua;
38114 extern int libata_noacpi;
38115diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
38116index 4edb1a8..84e1658 100644
38117--- a/drivers/ata/pata_arasan_cf.c
38118+++ b/drivers/ata/pata_arasan_cf.c
38119@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
38120 /* Handle platform specific quirks */
38121 if (quirk) {
38122 if (quirk & CF_BROKEN_PIO) {
38123- ap->ops->set_piomode = NULL;
38124+ pax_open_kernel();
38125+ *(void **)&ap->ops->set_piomode = NULL;
38126+ pax_close_kernel();
38127 ap->pio_mask = 0;
38128 }
38129 if (quirk & CF_BROKEN_MWDMA)
38130diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
38131index f9b983a..887b9d8 100644
38132--- a/drivers/atm/adummy.c
38133+++ b/drivers/atm/adummy.c
38134@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
38135 vcc->pop(vcc, skb);
38136 else
38137 dev_kfree_skb_any(skb);
38138- atomic_inc(&vcc->stats->tx);
38139+ atomic_inc_unchecked(&vcc->stats->tx);
38140
38141 return 0;
38142 }
38143diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
38144index f1a9198..f466a4a 100644
38145--- a/drivers/atm/ambassador.c
38146+++ b/drivers/atm/ambassador.c
38147@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
38148 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
38149
38150 // VC layer stats
38151- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
38152+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
38153
38154 // free the descriptor
38155 kfree (tx_descr);
38156@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
38157 dump_skb ("<<<", vc, skb);
38158
38159 // VC layer stats
38160- atomic_inc(&atm_vcc->stats->rx);
38161+ atomic_inc_unchecked(&atm_vcc->stats->rx);
38162 __net_timestamp(skb);
38163 // end of our responsibility
38164 atm_vcc->push (atm_vcc, skb);
38165@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
38166 } else {
38167 PRINTK (KERN_INFO, "dropped over-size frame");
38168 // should we count this?
38169- atomic_inc(&atm_vcc->stats->rx_drop);
38170+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
38171 }
38172
38173 } else {
38174@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
38175 }
38176
38177 if (check_area (skb->data, skb->len)) {
38178- atomic_inc(&atm_vcc->stats->tx_err);
38179+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
38180 return -ENOMEM; // ?
38181 }
38182
38183diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
38184index 480fa6f..947067c 100644
38185--- a/drivers/atm/atmtcp.c
38186+++ b/drivers/atm/atmtcp.c
38187@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
38188 if (vcc->pop) vcc->pop(vcc,skb);
38189 else dev_kfree_skb(skb);
38190 if (dev_data) return 0;
38191- atomic_inc(&vcc->stats->tx_err);
38192+ atomic_inc_unchecked(&vcc->stats->tx_err);
38193 return -ENOLINK;
38194 }
38195 size = skb->len+sizeof(struct atmtcp_hdr);
38196@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
38197 if (!new_skb) {
38198 if (vcc->pop) vcc->pop(vcc,skb);
38199 else dev_kfree_skb(skb);
38200- atomic_inc(&vcc->stats->tx_err);
38201+ atomic_inc_unchecked(&vcc->stats->tx_err);
38202 return -ENOBUFS;
38203 }
38204 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
38205@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
38206 if (vcc->pop) vcc->pop(vcc,skb);
38207 else dev_kfree_skb(skb);
38208 out_vcc->push(out_vcc,new_skb);
38209- atomic_inc(&vcc->stats->tx);
38210- atomic_inc(&out_vcc->stats->rx);
38211+ atomic_inc_unchecked(&vcc->stats->tx);
38212+ atomic_inc_unchecked(&out_vcc->stats->rx);
38213 return 0;
38214 }
38215
38216@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
38217 read_unlock(&vcc_sklist_lock);
38218 if (!out_vcc) {
38219 result = -EUNATCH;
38220- atomic_inc(&vcc->stats->tx_err);
38221+ atomic_inc_unchecked(&vcc->stats->tx_err);
38222 goto done;
38223 }
38224 skb_pull(skb,sizeof(struct atmtcp_hdr));
38225@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
38226 __net_timestamp(new_skb);
38227 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
38228 out_vcc->push(out_vcc,new_skb);
38229- atomic_inc(&vcc->stats->tx);
38230- atomic_inc(&out_vcc->stats->rx);
38231+ atomic_inc_unchecked(&vcc->stats->tx);
38232+ atomic_inc_unchecked(&out_vcc->stats->rx);
38233 done:
38234 if (vcc->pop) vcc->pop(vcc,skb);
38235 else dev_kfree_skb(skb);
38236diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
38237index d65975a..0b87e20 100644
38238--- a/drivers/atm/eni.c
38239+++ b/drivers/atm/eni.c
38240@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
38241 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
38242 vcc->dev->number);
38243 length = 0;
38244- atomic_inc(&vcc->stats->rx_err);
38245+ atomic_inc_unchecked(&vcc->stats->rx_err);
38246 }
38247 else {
38248 length = ATM_CELL_SIZE-1; /* no HEC */
38249@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
38250 size);
38251 }
38252 eff = length = 0;
38253- atomic_inc(&vcc->stats->rx_err);
38254+ atomic_inc_unchecked(&vcc->stats->rx_err);
38255 }
38256 else {
38257 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
38258@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
38259 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
38260 vcc->dev->number,vcc->vci,length,size << 2,descr);
38261 length = eff = 0;
38262- atomic_inc(&vcc->stats->rx_err);
38263+ atomic_inc_unchecked(&vcc->stats->rx_err);
38264 }
38265 }
38266 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
38267@@ -767,7 +767,7 @@ rx_dequeued++;
38268 vcc->push(vcc,skb);
38269 pushed++;
38270 }
38271- atomic_inc(&vcc->stats->rx);
38272+ atomic_inc_unchecked(&vcc->stats->rx);
38273 }
38274 wake_up(&eni_dev->rx_wait);
38275 }
38276@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
38277 PCI_DMA_TODEVICE);
38278 if (vcc->pop) vcc->pop(vcc,skb);
38279 else dev_kfree_skb_irq(skb);
38280- atomic_inc(&vcc->stats->tx);
38281+ atomic_inc_unchecked(&vcc->stats->tx);
38282 wake_up(&eni_dev->tx_wait);
38283 dma_complete++;
38284 }
38285diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
38286index 82f2ae0..f205c02 100644
38287--- a/drivers/atm/firestream.c
38288+++ b/drivers/atm/firestream.c
38289@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
38290 }
38291 }
38292
38293- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
38294+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
38295
38296 fs_dprintk (FS_DEBUG_TXMEM, "i");
38297 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
38298@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
38299 #endif
38300 skb_put (skb, qe->p1 & 0xffff);
38301 ATM_SKB(skb)->vcc = atm_vcc;
38302- atomic_inc(&atm_vcc->stats->rx);
38303+ atomic_inc_unchecked(&atm_vcc->stats->rx);
38304 __net_timestamp(skb);
38305 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
38306 atm_vcc->push (atm_vcc, skb);
38307@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
38308 kfree (pe);
38309 }
38310 if (atm_vcc)
38311- atomic_inc(&atm_vcc->stats->rx_drop);
38312+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
38313 break;
38314 case 0x1f: /* Reassembly abort: no buffers. */
38315 /* Silently increment error counter. */
38316 if (atm_vcc)
38317- atomic_inc(&atm_vcc->stats->rx_drop);
38318+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
38319 break;
38320 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
38321 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
38322diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
38323index d4725fc..2d4ea65 100644
38324--- a/drivers/atm/fore200e.c
38325+++ b/drivers/atm/fore200e.c
38326@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
38327 #endif
38328 /* check error condition */
38329 if (*entry->status & STATUS_ERROR)
38330- atomic_inc(&vcc->stats->tx_err);
38331+ atomic_inc_unchecked(&vcc->stats->tx_err);
38332 else
38333- atomic_inc(&vcc->stats->tx);
38334+ atomic_inc_unchecked(&vcc->stats->tx);
38335 }
38336 }
38337
38338@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
38339 if (skb == NULL) {
38340 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
38341
38342- atomic_inc(&vcc->stats->rx_drop);
38343+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38344 return -ENOMEM;
38345 }
38346
38347@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
38348
38349 dev_kfree_skb_any(skb);
38350
38351- atomic_inc(&vcc->stats->rx_drop);
38352+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38353 return -ENOMEM;
38354 }
38355
38356 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
38357
38358 vcc->push(vcc, skb);
38359- atomic_inc(&vcc->stats->rx);
38360+ atomic_inc_unchecked(&vcc->stats->rx);
38361
38362 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
38363
38364@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
38365 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
38366 fore200e->atm_dev->number,
38367 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
38368- atomic_inc(&vcc->stats->rx_err);
38369+ atomic_inc_unchecked(&vcc->stats->rx_err);
38370 }
38371 }
38372
38373@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
38374 goto retry_here;
38375 }
38376
38377- atomic_inc(&vcc->stats->tx_err);
38378+ atomic_inc_unchecked(&vcc->stats->tx_err);
38379
38380 fore200e->tx_sat++;
38381 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
38382diff --git a/drivers/atm/he.c b/drivers/atm/he.c
38383index c39702b..785b73b 100644
38384--- a/drivers/atm/he.c
38385+++ b/drivers/atm/he.c
38386@@ -1689,7 +1689,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
38387
38388 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
38389 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
38390- atomic_inc(&vcc->stats->rx_drop);
38391+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38392 goto return_host_buffers;
38393 }
38394
38395@@ -1716,7 +1716,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
38396 RBRQ_LEN_ERR(he_dev->rbrq_head)
38397 ? "LEN_ERR" : "",
38398 vcc->vpi, vcc->vci);
38399- atomic_inc(&vcc->stats->rx_err);
38400+ atomic_inc_unchecked(&vcc->stats->rx_err);
38401 goto return_host_buffers;
38402 }
38403
38404@@ -1768,7 +1768,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
38405 vcc->push(vcc, skb);
38406 spin_lock(&he_dev->global_lock);
38407
38408- atomic_inc(&vcc->stats->rx);
38409+ atomic_inc_unchecked(&vcc->stats->rx);
38410
38411 return_host_buffers:
38412 ++pdus_assembled;
38413@@ -2094,7 +2094,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
38414 tpd->vcc->pop(tpd->vcc, tpd->skb);
38415 else
38416 dev_kfree_skb_any(tpd->skb);
38417- atomic_inc(&tpd->vcc->stats->tx_err);
38418+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
38419 }
38420 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
38421 return;
38422@@ -2506,7 +2506,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38423 vcc->pop(vcc, skb);
38424 else
38425 dev_kfree_skb_any(skb);
38426- atomic_inc(&vcc->stats->tx_err);
38427+ atomic_inc_unchecked(&vcc->stats->tx_err);
38428 return -EINVAL;
38429 }
38430
38431@@ -2517,7 +2517,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38432 vcc->pop(vcc, skb);
38433 else
38434 dev_kfree_skb_any(skb);
38435- atomic_inc(&vcc->stats->tx_err);
38436+ atomic_inc_unchecked(&vcc->stats->tx_err);
38437 return -EINVAL;
38438 }
38439 #endif
38440@@ -2529,7 +2529,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38441 vcc->pop(vcc, skb);
38442 else
38443 dev_kfree_skb_any(skb);
38444- atomic_inc(&vcc->stats->tx_err);
38445+ atomic_inc_unchecked(&vcc->stats->tx_err);
38446 spin_unlock_irqrestore(&he_dev->global_lock, flags);
38447 return -ENOMEM;
38448 }
38449@@ -2571,7 +2571,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38450 vcc->pop(vcc, skb);
38451 else
38452 dev_kfree_skb_any(skb);
38453- atomic_inc(&vcc->stats->tx_err);
38454+ atomic_inc_unchecked(&vcc->stats->tx_err);
38455 spin_unlock_irqrestore(&he_dev->global_lock, flags);
38456 return -ENOMEM;
38457 }
38458@@ -2602,7 +2602,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
38459 __enqueue_tpd(he_dev, tpd, cid);
38460 spin_unlock_irqrestore(&he_dev->global_lock, flags);
38461
38462- atomic_inc(&vcc->stats->tx);
38463+ atomic_inc_unchecked(&vcc->stats->tx);
38464
38465 return 0;
38466 }
38467diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
38468index 1dc0519..1aadaf7 100644
38469--- a/drivers/atm/horizon.c
38470+++ b/drivers/atm/horizon.c
38471@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
38472 {
38473 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
38474 // VC layer stats
38475- atomic_inc(&vcc->stats->rx);
38476+ atomic_inc_unchecked(&vcc->stats->rx);
38477 __net_timestamp(skb);
38478 // end of our responsibility
38479 vcc->push (vcc, skb);
38480@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
38481 dev->tx_iovec = NULL;
38482
38483 // VC layer stats
38484- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
38485+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
38486
38487 // free the skb
38488 hrz_kfree_skb (skb);
38489diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
38490index 2b24ed0..b3d6acc 100644
38491--- a/drivers/atm/idt77252.c
38492+++ b/drivers/atm/idt77252.c
38493@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
38494 else
38495 dev_kfree_skb(skb);
38496
38497- atomic_inc(&vcc->stats->tx);
38498+ atomic_inc_unchecked(&vcc->stats->tx);
38499 }
38500
38501 atomic_dec(&scq->used);
38502@@ -1072,13 +1072,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38503 if ((sb = dev_alloc_skb(64)) == NULL) {
38504 printk("%s: Can't allocate buffers for aal0.\n",
38505 card->name);
38506- atomic_add(i, &vcc->stats->rx_drop);
38507+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
38508 break;
38509 }
38510 if (!atm_charge(vcc, sb->truesize)) {
38511 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
38512 card->name);
38513- atomic_add(i - 1, &vcc->stats->rx_drop);
38514+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
38515 dev_kfree_skb(sb);
38516 break;
38517 }
38518@@ -1095,7 +1095,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38519 ATM_SKB(sb)->vcc = vcc;
38520 __net_timestamp(sb);
38521 vcc->push(vcc, sb);
38522- atomic_inc(&vcc->stats->rx);
38523+ atomic_inc_unchecked(&vcc->stats->rx);
38524
38525 cell += ATM_CELL_PAYLOAD;
38526 }
38527@@ -1132,13 +1132,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38528 "(CDC: %08x)\n",
38529 card->name, len, rpp->len, readl(SAR_REG_CDC));
38530 recycle_rx_pool_skb(card, rpp);
38531- atomic_inc(&vcc->stats->rx_err);
38532+ atomic_inc_unchecked(&vcc->stats->rx_err);
38533 return;
38534 }
38535 if (stat & SAR_RSQE_CRC) {
38536 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
38537 recycle_rx_pool_skb(card, rpp);
38538- atomic_inc(&vcc->stats->rx_err);
38539+ atomic_inc_unchecked(&vcc->stats->rx_err);
38540 return;
38541 }
38542 if (skb_queue_len(&rpp->queue) > 1) {
38543@@ -1149,7 +1149,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38544 RXPRINTK("%s: Can't alloc RX skb.\n",
38545 card->name);
38546 recycle_rx_pool_skb(card, rpp);
38547- atomic_inc(&vcc->stats->rx_err);
38548+ atomic_inc_unchecked(&vcc->stats->rx_err);
38549 return;
38550 }
38551 if (!atm_charge(vcc, skb->truesize)) {
38552@@ -1168,7 +1168,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38553 __net_timestamp(skb);
38554
38555 vcc->push(vcc, skb);
38556- atomic_inc(&vcc->stats->rx);
38557+ atomic_inc_unchecked(&vcc->stats->rx);
38558
38559 return;
38560 }
38561@@ -1190,7 +1190,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
38562 __net_timestamp(skb);
38563
38564 vcc->push(vcc, skb);
38565- atomic_inc(&vcc->stats->rx);
38566+ atomic_inc_unchecked(&vcc->stats->rx);
38567
38568 if (skb->truesize > SAR_FB_SIZE_3)
38569 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
38570@@ -1301,14 +1301,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
38571 if (vcc->qos.aal != ATM_AAL0) {
38572 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
38573 card->name, vpi, vci);
38574- atomic_inc(&vcc->stats->rx_drop);
38575+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38576 goto drop;
38577 }
38578
38579 if ((sb = dev_alloc_skb(64)) == NULL) {
38580 printk("%s: Can't allocate buffers for AAL0.\n",
38581 card->name);
38582- atomic_inc(&vcc->stats->rx_err);
38583+ atomic_inc_unchecked(&vcc->stats->rx_err);
38584 goto drop;
38585 }
38586
38587@@ -1327,7 +1327,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
38588 ATM_SKB(sb)->vcc = vcc;
38589 __net_timestamp(sb);
38590 vcc->push(vcc, sb);
38591- atomic_inc(&vcc->stats->rx);
38592+ atomic_inc_unchecked(&vcc->stats->rx);
38593
38594 drop:
38595 skb_pull(queue, 64);
38596@@ -1952,13 +1952,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
38597
38598 if (vc == NULL) {
38599 printk("%s: NULL connection in send().\n", card->name);
38600- atomic_inc(&vcc->stats->tx_err);
38601+ atomic_inc_unchecked(&vcc->stats->tx_err);
38602 dev_kfree_skb(skb);
38603 return -EINVAL;
38604 }
38605 if (!test_bit(VCF_TX, &vc->flags)) {
38606 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
38607- atomic_inc(&vcc->stats->tx_err);
38608+ atomic_inc_unchecked(&vcc->stats->tx_err);
38609 dev_kfree_skb(skb);
38610 return -EINVAL;
38611 }
38612@@ -1970,14 +1970,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
38613 break;
38614 default:
38615 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
38616- atomic_inc(&vcc->stats->tx_err);
38617+ atomic_inc_unchecked(&vcc->stats->tx_err);
38618 dev_kfree_skb(skb);
38619 return -EINVAL;
38620 }
38621
38622 if (skb_shinfo(skb)->nr_frags != 0) {
38623 printk("%s: No scatter-gather yet.\n", card->name);
38624- atomic_inc(&vcc->stats->tx_err);
38625+ atomic_inc_unchecked(&vcc->stats->tx_err);
38626 dev_kfree_skb(skb);
38627 return -EINVAL;
38628 }
38629@@ -1985,7 +1985,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
38630
38631 err = queue_skb(card, vc, skb, oam);
38632 if (err) {
38633- atomic_inc(&vcc->stats->tx_err);
38634+ atomic_inc_unchecked(&vcc->stats->tx_err);
38635 dev_kfree_skb(skb);
38636 return err;
38637 }
38638@@ -2008,7 +2008,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
38639 skb = dev_alloc_skb(64);
38640 if (!skb) {
38641 printk("%s: Out of memory in send_oam().\n", card->name);
38642- atomic_inc(&vcc->stats->tx_err);
38643+ atomic_inc_unchecked(&vcc->stats->tx_err);
38644 return -ENOMEM;
38645 }
38646 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
38647diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
38648index 4217f29..88f547a 100644
38649--- a/drivers/atm/iphase.c
38650+++ b/drivers/atm/iphase.c
38651@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
38652 status = (u_short) (buf_desc_ptr->desc_mode);
38653 if (status & (RX_CER | RX_PTE | RX_OFL))
38654 {
38655- atomic_inc(&vcc->stats->rx_err);
38656+ atomic_inc_unchecked(&vcc->stats->rx_err);
38657 IF_ERR(printk("IA: bad packet, dropping it");)
38658 if (status & RX_CER) {
38659 IF_ERR(printk(" cause: packet CRC error\n");)
38660@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
38661 len = dma_addr - buf_addr;
38662 if (len > iadev->rx_buf_sz) {
38663 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
38664- atomic_inc(&vcc->stats->rx_err);
38665+ atomic_inc_unchecked(&vcc->stats->rx_err);
38666 goto out_free_desc;
38667 }
38668
38669@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
38670 ia_vcc = INPH_IA_VCC(vcc);
38671 if (ia_vcc == NULL)
38672 {
38673- atomic_inc(&vcc->stats->rx_err);
38674+ atomic_inc_unchecked(&vcc->stats->rx_err);
38675 atm_return(vcc, skb->truesize);
38676 dev_kfree_skb_any(skb);
38677 goto INCR_DLE;
38678@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
38679 if ((length > iadev->rx_buf_sz) || (length >
38680 (skb->len - sizeof(struct cpcs_trailer))))
38681 {
38682- atomic_inc(&vcc->stats->rx_err);
38683+ atomic_inc_unchecked(&vcc->stats->rx_err);
38684 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
38685 length, skb->len);)
38686 atm_return(vcc, skb->truesize);
38687@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
38688
38689 IF_RX(printk("rx_dle_intr: skb push");)
38690 vcc->push(vcc,skb);
38691- atomic_inc(&vcc->stats->rx);
38692+ atomic_inc_unchecked(&vcc->stats->rx);
38693 iadev->rx_pkt_cnt++;
38694 }
38695 INCR_DLE:
38696@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
38697 {
38698 struct k_sonet_stats *stats;
38699 stats = &PRIV(_ia_dev[board])->sonet_stats;
38700- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
38701- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
38702- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
38703- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
38704- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
38705- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
38706- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
38707- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
38708- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
38709+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
38710+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
38711+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
38712+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
38713+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
38714+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
38715+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
38716+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
38717+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
38718 }
38719 ia_cmds.status = 0;
38720 break;
38721@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
38722 if ((desc == 0) || (desc > iadev->num_tx_desc))
38723 {
38724 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
38725- atomic_inc(&vcc->stats->tx);
38726+ atomic_inc_unchecked(&vcc->stats->tx);
38727 if (vcc->pop)
38728 vcc->pop(vcc, skb);
38729 else
38730@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
38731 ATM_DESC(skb) = vcc->vci;
38732 skb_queue_tail(&iadev->tx_dma_q, skb);
38733
38734- atomic_inc(&vcc->stats->tx);
38735+ atomic_inc_unchecked(&vcc->stats->tx);
38736 iadev->tx_pkt_cnt++;
38737 /* Increment transaction counter */
38738 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
38739
38740 #if 0
38741 /* add flow control logic */
38742- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
38743+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
38744 if (iavcc->vc_desc_cnt > 10) {
38745 vcc->tx_quota = vcc->tx_quota * 3 / 4;
38746 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
38747diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
38748index fa7d7019..1e404c7 100644
38749--- a/drivers/atm/lanai.c
38750+++ b/drivers/atm/lanai.c
38751@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
38752 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
38753 lanai_endtx(lanai, lvcc);
38754 lanai_free_skb(lvcc->tx.atmvcc, skb);
38755- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
38756+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
38757 }
38758
38759 /* Try to fill the buffer - don't call unless there is backlog */
38760@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
38761 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
38762 __net_timestamp(skb);
38763 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
38764- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
38765+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
38766 out:
38767 lvcc->rx.buf.ptr = end;
38768 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
38769@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
38770 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
38771 "vcc %d\n", lanai->number, (unsigned int) s, vci);
38772 lanai->stats.service_rxnotaal5++;
38773- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
38774+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
38775 return 0;
38776 }
38777 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
38778@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
38779 int bytes;
38780 read_unlock(&vcc_sklist_lock);
38781 DPRINTK("got trashed rx pdu on vci %d\n", vci);
38782- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
38783+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
38784 lvcc->stats.x.aal5.service_trash++;
38785 bytes = (SERVICE_GET_END(s) * 16) -
38786 (((unsigned long) lvcc->rx.buf.ptr) -
38787@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
38788 }
38789 if (s & SERVICE_STREAM) {
38790 read_unlock(&vcc_sklist_lock);
38791- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
38792+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
38793 lvcc->stats.x.aal5.service_stream++;
38794 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
38795 "PDU on VCI %d!\n", lanai->number, vci);
38796@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
38797 return 0;
38798 }
38799 DPRINTK("got rx crc error on vci %d\n", vci);
38800- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
38801+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
38802 lvcc->stats.x.aal5.service_rxcrc++;
38803 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
38804 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
38805diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
38806index 9988ac9..7c52585 100644
38807--- a/drivers/atm/nicstar.c
38808+++ b/drivers/atm/nicstar.c
38809@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
38810 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
38811 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
38812 card->index);
38813- atomic_inc(&vcc->stats->tx_err);
38814+ atomic_inc_unchecked(&vcc->stats->tx_err);
38815 dev_kfree_skb_any(skb);
38816 return -EINVAL;
38817 }
38818@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
38819 if (!vc->tx) {
38820 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
38821 card->index);
38822- atomic_inc(&vcc->stats->tx_err);
38823+ atomic_inc_unchecked(&vcc->stats->tx_err);
38824 dev_kfree_skb_any(skb);
38825 return -EINVAL;
38826 }
38827@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
38828 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
38829 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
38830 card->index);
38831- atomic_inc(&vcc->stats->tx_err);
38832+ atomic_inc_unchecked(&vcc->stats->tx_err);
38833 dev_kfree_skb_any(skb);
38834 return -EINVAL;
38835 }
38836
38837 if (skb_shinfo(skb)->nr_frags != 0) {
38838 printk("nicstar%d: No scatter-gather yet.\n", card->index);
38839- atomic_inc(&vcc->stats->tx_err);
38840+ atomic_inc_unchecked(&vcc->stats->tx_err);
38841 dev_kfree_skb_any(skb);
38842 return -EINVAL;
38843 }
38844@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
38845 }
38846
38847 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
38848- atomic_inc(&vcc->stats->tx_err);
38849+ atomic_inc_unchecked(&vcc->stats->tx_err);
38850 dev_kfree_skb_any(skb);
38851 return -EIO;
38852 }
38853- atomic_inc(&vcc->stats->tx);
38854+ atomic_inc_unchecked(&vcc->stats->tx);
38855
38856 return 0;
38857 }
38858@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38859 printk
38860 ("nicstar%d: Can't allocate buffers for aal0.\n",
38861 card->index);
38862- atomic_add(i, &vcc->stats->rx_drop);
38863+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
38864 break;
38865 }
38866 if (!atm_charge(vcc, sb->truesize)) {
38867 RXPRINTK
38868 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
38869 card->index);
38870- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
38871+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
38872 dev_kfree_skb_any(sb);
38873 break;
38874 }
38875@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38876 ATM_SKB(sb)->vcc = vcc;
38877 __net_timestamp(sb);
38878 vcc->push(vcc, sb);
38879- atomic_inc(&vcc->stats->rx);
38880+ atomic_inc_unchecked(&vcc->stats->rx);
38881 cell += ATM_CELL_PAYLOAD;
38882 }
38883
38884@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38885 if (iovb == NULL) {
38886 printk("nicstar%d: Out of iovec buffers.\n",
38887 card->index);
38888- atomic_inc(&vcc->stats->rx_drop);
38889+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38890 recycle_rx_buf(card, skb);
38891 return;
38892 }
38893@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38894 small or large buffer itself. */
38895 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
38896 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
38897- atomic_inc(&vcc->stats->rx_err);
38898+ atomic_inc_unchecked(&vcc->stats->rx_err);
38899 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
38900 NS_MAX_IOVECS);
38901 NS_PRV_IOVCNT(iovb) = 0;
38902@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38903 ("nicstar%d: Expected a small buffer, and this is not one.\n",
38904 card->index);
38905 which_list(card, skb);
38906- atomic_inc(&vcc->stats->rx_err);
38907+ atomic_inc_unchecked(&vcc->stats->rx_err);
38908 recycle_rx_buf(card, skb);
38909 vc->rx_iov = NULL;
38910 recycle_iov_buf(card, iovb);
38911@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38912 ("nicstar%d: Expected a large buffer, and this is not one.\n",
38913 card->index);
38914 which_list(card, skb);
38915- atomic_inc(&vcc->stats->rx_err);
38916+ atomic_inc_unchecked(&vcc->stats->rx_err);
38917 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
38918 NS_PRV_IOVCNT(iovb));
38919 vc->rx_iov = NULL;
38920@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38921 printk(" - PDU size mismatch.\n");
38922 else
38923 printk(".\n");
38924- atomic_inc(&vcc->stats->rx_err);
38925+ atomic_inc_unchecked(&vcc->stats->rx_err);
38926 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
38927 NS_PRV_IOVCNT(iovb));
38928 vc->rx_iov = NULL;
38929@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38930 /* skb points to a small buffer */
38931 if (!atm_charge(vcc, skb->truesize)) {
38932 push_rxbufs(card, skb);
38933- atomic_inc(&vcc->stats->rx_drop);
38934+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38935 } else {
38936 skb_put(skb, len);
38937 dequeue_sm_buf(card, skb);
38938@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38939 ATM_SKB(skb)->vcc = vcc;
38940 __net_timestamp(skb);
38941 vcc->push(vcc, skb);
38942- atomic_inc(&vcc->stats->rx);
38943+ atomic_inc_unchecked(&vcc->stats->rx);
38944 }
38945 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
38946 struct sk_buff *sb;
38947@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38948 if (len <= NS_SMBUFSIZE) {
38949 if (!atm_charge(vcc, sb->truesize)) {
38950 push_rxbufs(card, sb);
38951- atomic_inc(&vcc->stats->rx_drop);
38952+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38953 } else {
38954 skb_put(sb, len);
38955 dequeue_sm_buf(card, sb);
38956@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38957 ATM_SKB(sb)->vcc = vcc;
38958 __net_timestamp(sb);
38959 vcc->push(vcc, sb);
38960- atomic_inc(&vcc->stats->rx);
38961+ atomic_inc_unchecked(&vcc->stats->rx);
38962 }
38963
38964 push_rxbufs(card, skb);
38965@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38966
38967 if (!atm_charge(vcc, skb->truesize)) {
38968 push_rxbufs(card, skb);
38969- atomic_inc(&vcc->stats->rx_drop);
38970+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38971 } else {
38972 dequeue_lg_buf(card, skb);
38973 #ifdef NS_USE_DESTRUCTORS
38974@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38975 ATM_SKB(skb)->vcc = vcc;
38976 __net_timestamp(skb);
38977 vcc->push(vcc, skb);
38978- atomic_inc(&vcc->stats->rx);
38979+ atomic_inc_unchecked(&vcc->stats->rx);
38980 }
38981
38982 push_rxbufs(card, sb);
38983@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38984 printk
38985 ("nicstar%d: Out of huge buffers.\n",
38986 card->index);
38987- atomic_inc(&vcc->stats->rx_drop);
38988+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38989 recycle_iovec_rx_bufs(card,
38990 (struct iovec *)
38991 iovb->data,
38992@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38993 card->hbpool.count++;
38994 } else
38995 dev_kfree_skb_any(hb);
38996- atomic_inc(&vcc->stats->rx_drop);
38997+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38998 } else {
38999 /* Copy the small buffer to the huge buffer */
39000 sb = (struct sk_buff *)iov->iov_base;
39001@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
39002 #endif /* NS_USE_DESTRUCTORS */
39003 __net_timestamp(hb);
39004 vcc->push(vcc, hb);
39005- atomic_inc(&vcc->stats->rx);
39006+ atomic_inc_unchecked(&vcc->stats->rx);
39007 }
39008 }
39009
39010diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
39011index 7652e8d..db45069 100644
39012--- a/drivers/atm/solos-pci.c
39013+++ b/drivers/atm/solos-pci.c
39014@@ -838,7 +838,7 @@ static void solos_bh(unsigned long card_arg)
39015 }
39016 atm_charge(vcc, skb->truesize);
39017 vcc->push(vcc, skb);
39018- atomic_inc(&vcc->stats->rx);
39019+ atomic_inc_unchecked(&vcc->stats->rx);
39020 break;
39021
39022 case PKT_STATUS:
39023@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
39024 vcc = SKB_CB(oldskb)->vcc;
39025
39026 if (vcc) {
39027- atomic_inc(&vcc->stats->tx);
39028+ atomic_inc_unchecked(&vcc->stats->tx);
39029 solos_pop(vcc, oldskb);
39030 } else {
39031 dev_kfree_skb_irq(oldskb);
39032diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
39033index 0215934..ce9f5b1 100644
39034--- a/drivers/atm/suni.c
39035+++ b/drivers/atm/suni.c
39036@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
39037
39038
39039 #define ADD_LIMITED(s,v) \
39040- atomic_add((v),&stats->s); \
39041- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
39042+ atomic_add_unchecked((v),&stats->s); \
39043+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
39044
39045
39046 static void suni_hz(unsigned long from_timer)
39047diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
39048index 5120a96..e2572bd 100644
39049--- a/drivers/atm/uPD98402.c
39050+++ b/drivers/atm/uPD98402.c
39051@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
39052 struct sonet_stats tmp;
39053 int error = 0;
39054
39055- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
39056+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
39057 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
39058 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
39059 if (zero && !error) {
39060@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
39061
39062
39063 #define ADD_LIMITED(s,v) \
39064- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
39065- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
39066- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
39067+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
39068+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
39069+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
39070
39071
39072 static void stat_event(struct atm_dev *dev)
39073@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
39074 if (reason & uPD98402_INT_PFM) stat_event(dev);
39075 if (reason & uPD98402_INT_PCO) {
39076 (void) GET(PCOCR); /* clear interrupt cause */
39077- atomic_add(GET(HECCT),
39078+ atomic_add_unchecked(GET(HECCT),
39079 &PRIV(dev)->sonet_stats.uncorr_hcs);
39080 }
39081 if ((reason & uPD98402_INT_RFO) &&
39082@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
39083 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
39084 uPD98402_INT_LOS),PIMR); /* enable them */
39085 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
39086- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
39087- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
39088- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
39089+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
39090+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
39091+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
39092 return 0;
39093 }
39094
39095diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
39096index 969c3c2..9b72956 100644
39097--- a/drivers/atm/zatm.c
39098+++ b/drivers/atm/zatm.c
39099@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
39100 }
39101 if (!size) {
39102 dev_kfree_skb_irq(skb);
39103- if (vcc) atomic_inc(&vcc->stats->rx_err);
39104+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
39105 continue;
39106 }
39107 if (!atm_charge(vcc,skb->truesize)) {
39108@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
39109 skb->len = size;
39110 ATM_SKB(skb)->vcc = vcc;
39111 vcc->push(vcc,skb);
39112- atomic_inc(&vcc->stats->rx);
39113+ atomic_inc_unchecked(&vcc->stats->rx);
39114 }
39115 zout(pos & 0xffff,MTA(mbx));
39116 #if 0 /* probably a stupid idea */
39117@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
39118 skb_queue_head(&zatm_vcc->backlog,skb);
39119 break;
39120 }
39121- atomic_inc(&vcc->stats->tx);
39122+ atomic_inc_unchecked(&vcc->stats->tx);
39123 wake_up(&zatm_vcc->tx_wait);
39124 }
39125
39126diff --git a/drivers/base/bus.c b/drivers/base/bus.c
39127index 83e910a..b224a73 100644
39128--- a/drivers/base/bus.c
39129+++ b/drivers/base/bus.c
39130@@ -1124,7 +1124,7 @@ int subsys_interface_register(struct subsys_interface *sif)
39131 return -EINVAL;
39132
39133 mutex_lock(&subsys->p->mutex);
39134- list_add_tail(&sif->node, &subsys->p->interfaces);
39135+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
39136 if (sif->add_dev) {
39137 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
39138 while ((dev = subsys_dev_iter_next(&iter)))
39139@@ -1149,7 +1149,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
39140 subsys = sif->subsys;
39141
39142 mutex_lock(&subsys->p->mutex);
39143- list_del_init(&sif->node);
39144+ pax_list_del_init((struct list_head *)&sif->node);
39145 if (sif->remove_dev) {
39146 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
39147 while ((dev = subsys_dev_iter_next(&iter)))
39148diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
39149index 25798db..15f130e 100644
39150--- a/drivers/base/devtmpfs.c
39151+++ b/drivers/base/devtmpfs.c
39152@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
39153 if (!thread)
39154 return 0;
39155
39156- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
39157+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
39158 if (err)
39159 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
39160 else
39161@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
39162 *err = sys_unshare(CLONE_NEWNS);
39163 if (*err)
39164 goto out;
39165- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
39166+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
39167 if (*err)
39168 goto out;
39169- sys_chdir("/.."); /* will traverse into overmounted root */
39170- sys_chroot(".");
39171+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
39172+ sys_chroot((char __force_user *)".");
39173 complete(&setup_done);
39174 while (1) {
39175 spin_lock(&req_lock);
39176diff --git a/drivers/base/node.c b/drivers/base/node.c
39177index d51c49c..28908df 100644
39178--- a/drivers/base/node.c
39179+++ b/drivers/base/node.c
39180@@ -623,7 +623,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
39181 struct node_attr {
39182 struct device_attribute attr;
39183 enum node_states state;
39184-};
39185+} __do_const;
39186
39187 static ssize_t show_node_state(struct device *dev,
39188 struct device_attribute *attr, char *buf)
39189diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
39190index eee55c1..b8c9393 100644
39191--- a/drivers/base/power/domain.c
39192+++ b/drivers/base/power/domain.c
39193@@ -1821,9 +1821,9 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
39194
39195 if (dev->power.subsys_data->domain_data) {
39196 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
39197- gpd_data->ops = (struct gpd_dev_ops){ NULL };
39198+ memset(&gpd_data->ops, 0, sizeof(gpd_data->ops));
39199 if (clear_td)
39200- gpd_data->td = (struct gpd_timing_data){ 0 };
39201+ memset(&gpd_data->td, 0, sizeof(gpd_data->td));
39202
39203 if (--gpd_data->refcount == 0) {
39204 dev->power.subsys_data->domain_data = NULL;
39205@@ -1862,7 +1862,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
39206 {
39207 struct cpuidle_driver *cpuidle_drv;
39208 struct gpd_cpu_data *cpu_data;
39209- struct cpuidle_state *idle_state;
39210+ cpuidle_state_no_const *idle_state;
39211 int ret = 0;
39212
39213 if (IS_ERR_OR_NULL(genpd) || state < 0)
39214@@ -1930,7 +1930,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
39215 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
39216 {
39217 struct gpd_cpu_data *cpu_data;
39218- struct cpuidle_state *idle_state;
39219+ cpuidle_state_no_const *idle_state;
39220 int ret = 0;
39221
39222 if (IS_ERR_OR_NULL(genpd))
39223diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
39224index 95b181d1..c4f0e19 100644
39225--- a/drivers/base/power/sysfs.c
39226+++ b/drivers/base/power/sysfs.c
39227@@ -185,7 +185,7 @@ static ssize_t rtpm_status_show(struct device *dev,
39228 return -EIO;
39229 }
39230 }
39231- return sprintf(buf, p);
39232+ return sprintf(buf, "%s", p);
39233 }
39234
39235 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
39236diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
39237index eb1bd2e..2667d3a 100644
39238--- a/drivers/base/power/wakeup.c
39239+++ b/drivers/base/power/wakeup.c
39240@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
39241 * They need to be modified together atomically, so it's better to use one
39242 * atomic variable to hold them both.
39243 */
39244-static atomic_t combined_event_count = ATOMIC_INIT(0);
39245+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
39246
39247 #define IN_PROGRESS_BITS (sizeof(int) * 4)
39248 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
39249
39250 static void split_counters(unsigned int *cnt, unsigned int *inpr)
39251 {
39252- unsigned int comb = atomic_read(&combined_event_count);
39253+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
39254
39255 *cnt = (comb >> IN_PROGRESS_BITS);
39256 *inpr = comb & MAX_IN_PROGRESS;
39257@@ -401,7 +401,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
39258 ws->start_prevent_time = ws->last_time;
39259
39260 /* Increment the counter of events in progress. */
39261- cec = atomic_inc_return(&combined_event_count);
39262+ cec = atomic_inc_return_unchecked(&combined_event_count);
39263
39264 trace_wakeup_source_activate(ws->name, cec);
39265 }
39266@@ -527,7 +527,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
39267 * Increment the counter of registered wakeup events and decrement the
39268 * couter of wakeup events in progress simultaneously.
39269 */
39270- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
39271+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
39272 trace_wakeup_source_deactivate(ws->name, cec);
39273
39274 split_counters(&cnt, &inpr);
39275diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
39276index dbb8350..4762f4c 100644
39277--- a/drivers/base/syscore.c
39278+++ b/drivers/base/syscore.c
39279@@ -22,7 +22,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
39280 void register_syscore_ops(struct syscore_ops *ops)
39281 {
39282 mutex_lock(&syscore_ops_lock);
39283- list_add_tail(&ops->node, &syscore_ops_list);
39284+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
39285 mutex_unlock(&syscore_ops_lock);
39286 }
39287 EXPORT_SYMBOL_GPL(register_syscore_ops);
39288@@ -34,7 +34,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
39289 void unregister_syscore_ops(struct syscore_ops *ops)
39290 {
39291 mutex_lock(&syscore_ops_lock);
39292- list_del(&ops->node);
39293+ pax_list_del((struct list_head *)&ops->node);
39294 mutex_unlock(&syscore_ops_lock);
39295 }
39296 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
39297diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
39298index ff20f19..018f1da 100644
39299--- a/drivers/block/cciss.c
39300+++ b/drivers/block/cciss.c
39301@@ -3008,7 +3008,7 @@ static void start_io(ctlr_info_t *h)
39302 while (!list_empty(&h->reqQ)) {
39303 c = list_entry(h->reqQ.next, CommandList_struct, list);
39304 /* can't do anything if fifo is full */
39305- if ((h->access.fifo_full(h))) {
39306+ if ((h->access->fifo_full(h))) {
39307 dev_warn(&h->pdev->dev, "fifo full\n");
39308 break;
39309 }
39310@@ -3018,7 +3018,7 @@ static void start_io(ctlr_info_t *h)
39311 h->Qdepth--;
39312
39313 /* Tell the controller execute command */
39314- h->access.submit_command(h, c);
39315+ h->access->submit_command(h, c);
39316
39317 /* Put job onto the completed Q */
39318 addQ(&h->cmpQ, c);
39319@@ -3444,17 +3444,17 @@ startio:
39320
39321 static inline unsigned long get_next_completion(ctlr_info_t *h)
39322 {
39323- return h->access.command_completed(h);
39324+ return h->access->command_completed(h);
39325 }
39326
39327 static inline int interrupt_pending(ctlr_info_t *h)
39328 {
39329- return h->access.intr_pending(h);
39330+ return h->access->intr_pending(h);
39331 }
39332
39333 static inline long interrupt_not_for_us(ctlr_info_t *h)
39334 {
39335- return ((h->access.intr_pending(h) == 0) ||
39336+ return ((h->access->intr_pending(h) == 0) ||
39337 (h->interrupts_enabled == 0));
39338 }
39339
39340@@ -3487,7 +3487,7 @@ static inline u32 next_command(ctlr_info_t *h)
39341 u32 a;
39342
39343 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
39344- return h->access.command_completed(h);
39345+ return h->access->command_completed(h);
39346
39347 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
39348 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
39349@@ -4044,7 +4044,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
39350 trans_support & CFGTBL_Trans_use_short_tags);
39351
39352 /* Change the access methods to the performant access methods */
39353- h->access = SA5_performant_access;
39354+ h->access = &SA5_performant_access;
39355 h->transMethod = CFGTBL_Trans_Performant;
39356
39357 return;
39358@@ -4318,7 +4318,7 @@ static int cciss_pci_init(ctlr_info_t *h)
39359 if (prod_index < 0)
39360 return -ENODEV;
39361 h->product_name = products[prod_index].product_name;
39362- h->access = *(products[prod_index].access);
39363+ h->access = products[prod_index].access;
39364
39365 if (cciss_board_disabled(h)) {
39366 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
39367@@ -5050,7 +5050,7 @@ reinit_after_soft_reset:
39368 }
39369
39370 /* make sure the board interrupts are off */
39371- h->access.set_intr_mask(h, CCISS_INTR_OFF);
39372+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
39373 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
39374 if (rc)
39375 goto clean2;
39376@@ -5100,7 +5100,7 @@ reinit_after_soft_reset:
39377 * fake ones to scoop up any residual completions.
39378 */
39379 spin_lock_irqsave(&h->lock, flags);
39380- h->access.set_intr_mask(h, CCISS_INTR_OFF);
39381+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
39382 spin_unlock_irqrestore(&h->lock, flags);
39383 free_irq(h->intr[h->intr_mode], h);
39384 rc = cciss_request_irq(h, cciss_msix_discard_completions,
39385@@ -5120,9 +5120,9 @@ reinit_after_soft_reset:
39386 dev_info(&h->pdev->dev, "Board READY.\n");
39387 dev_info(&h->pdev->dev,
39388 "Waiting for stale completions to drain.\n");
39389- h->access.set_intr_mask(h, CCISS_INTR_ON);
39390+ h->access->set_intr_mask(h, CCISS_INTR_ON);
39391 msleep(10000);
39392- h->access.set_intr_mask(h, CCISS_INTR_OFF);
39393+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
39394
39395 rc = controller_reset_failed(h->cfgtable);
39396 if (rc)
39397@@ -5145,7 +5145,7 @@ reinit_after_soft_reset:
39398 cciss_scsi_setup(h);
39399
39400 /* Turn the interrupts on so we can service requests */
39401- h->access.set_intr_mask(h, CCISS_INTR_ON);
39402+ h->access->set_intr_mask(h, CCISS_INTR_ON);
39403
39404 /* Get the firmware version */
39405 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
39406@@ -5217,7 +5217,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
39407 kfree(flush_buf);
39408 if (return_code != IO_OK)
39409 dev_warn(&h->pdev->dev, "Error flushing cache\n");
39410- h->access.set_intr_mask(h, CCISS_INTR_OFF);
39411+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
39412 free_irq(h->intr[h->intr_mode], h);
39413 }
39414
39415diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
39416index 7fda30e..2f27946 100644
39417--- a/drivers/block/cciss.h
39418+++ b/drivers/block/cciss.h
39419@@ -101,7 +101,7 @@ struct ctlr_info
39420 /* information about each logical volume */
39421 drive_info_struct *drv[CISS_MAX_LUN];
39422
39423- struct access_method access;
39424+ struct access_method *access;
39425
39426 /* queue and queue Info */
39427 struct list_head reqQ;
39428@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
39429 }
39430
39431 static struct access_method SA5_access = {
39432- SA5_submit_command,
39433- SA5_intr_mask,
39434- SA5_fifo_full,
39435- SA5_intr_pending,
39436- SA5_completed,
39437+ .submit_command = SA5_submit_command,
39438+ .set_intr_mask = SA5_intr_mask,
39439+ .fifo_full = SA5_fifo_full,
39440+ .intr_pending = SA5_intr_pending,
39441+ .command_completed = SA5_completed,
39442 };
39443
39444 static struct access_method SA5B_access = {
39445- SA5_submit_command,
39446- SA5B_intr_mask,
39447- SA5_fifo_full,
39448- SA5B_intr_pending,
39449- SA5_completed,
39450+ .submit_command = SA5_submit_command,
39451+ .set_intr_mask = SA5B_intr_mask,
39452+ .fifo_full = SA5_fifo_full,
39453+ .intr_pending = SA5B_intr_pending,
39454+ .command_completed = SA5_completed,
39455 };
39456
39457 static struct access_method SA5_performant_access = {
39458- SA5_submit_command,
39459- SA5_performant_intr_mask,
39460- SA5_fifo_full,
39461- SA5_performant_intr_pending,
39462- SA5_performant_completed,
39463+ .submit_command = SA5_submit_command,
39464+ .set_intr_mask = SA5_performant_intr_mask,
39465+ .fifo_full = SA5_fifo_full,
39466+ .intr_pending = SA5_performant_intr_pending,
39467+ .command_completed = SA5_performant_completed,
39468 };
39469
39470 struct board_type {
39471diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
39472index 2b94403..fd6ad1f 100644
39473--- a/drivers/block/cpqarray.c
39474+++ b/drivers/block/cpqarray.c
39475@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
39476 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
39477 goto Enomem4;
39478 }
39479- hba[i]->access.set_intr_mask(hba[i], 0);
39480+ hba[i]->access->set_intr_mask(hba[i], 0);
39481 if (request_irq(hba[i]->intr, do_ida_intr,
39482 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
39483 {
39484@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
39485 add_timer(&hba[i]->timer);
39486
39487 /* Enable IRQ now that spinlock and rate limit timer are set up */
39488- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
39489+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
39490
39491 for(j=0; j<NWD; j++) {
39492 struct gendisk *disk = ida_gendisk[i][j];
39493@@ -694,7 +694,7 @@ DBGINFO(
39494 for(i=0; i<NR_PRODUCTS; i++) {
39495 if (board_id == products[i].board_id) {
39496 c->product_name = products[i].product_name;
39497- c->access = *(products[i].access);
39498+ c->access = products[i].access;
39499 break;
39500 }
39501 }
39502@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
39503 hba[ctlr]->intr = intr;
39504 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
39505 hba[ctlr]->product_name = products[j].product_name;
39506- hba[ctlr]->access = *(products[j].access);
39507+ hba[ctlr]->access = products[j].access;
39508 hba[ctlr]->ctlr = ctlr;
39509 hba[ctlr]->board_id = board_id;
39510 hba[ctlr]->pci_dev = NULL; /* not PCI */
39511@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
39512
39513 while((c = h->reqQ) != NULL) {
39514 /* Can't do anything if we're busy */
39515- if (h->access.fifo_full(h) == 0)
39516+ if (h->access->fifo_full(h) == 0)
39517 return;
39518
39519 /* Get the first entry from the request Q */
39520@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
39521 h->Qdepth--;
39522
39523 /* Tell the controller to do our bidding */
39524- h->access.submit_command(h, c);
39525+ h->access->submit_command(h, c);
39526
39527 /* Get onto the completion Q */
39528 addQ(&h->cmpQ, c);
39529@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
39530 unsigned long flags;
39531 __u32 a,a1;
39532
39533- istat = h->access.intr_pending(h);
39534+ istat = h->access->intr_pending(h);
39535 /* Is this interrupt for us? */
39536 if (istat == 0)
39537 return IRQ_NONE;
39538@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
39539 */
39540 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
39541 if (istat & FIFO_NOT_EMPTY) {
39542- while((a = h->access.command_completed(h))) {
39543+ while((a = h->access->command_completed(h))) {
39544 a1 = a; a &= ~3;
39545 if ((c = h->cmpQ) == NULL)
39546 {
39547@@ -1448,11 +1448,11 @@ static int sendcmd(
39548 /*
39549 * Disable interrupt
39550 */
39551- info_p->access.set_intr_mask(info_p, 0);
39552+ info_p->access->set_intr_mask(info_p, 0);
39553 /* Make sure there is room in the command FIFO */
39554 /* Actually it should be completely empty at this time. */
39555 for (i = 200000; i > 0; i--) {
39556- temp = info_p->access.fifo_full(info_p);
39557+ temp = info_p->access->fifo_full(info_p);
39558 if (temp != 0) {
39559 break;
39560 }
39561@@ -1465,7 +1465,7 @@ DBG(
39562 /*
39563 * Send the cmd
39564 */
39565- info_p->access.submit_command(info_p, c);
39566+ info_p->access->submit_command(info_p, c);
39567 complete = pollcomplete(ctlr);
39568
39569 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
39570@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
39571 * we check the new geometry. Then turn interrupts back on when
39572 * we're done.
39573 */
39574- host->access.set_intr_mask(host, 0);
39575+ host->access->set_intr_mask(host, 0);
39576 getgeometry(ctlr);
39577- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
39578+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
39579
39580 for(i=0; i<NWD; i++) {
39581 struct gendisk *disk = ida_gendisk[ctlr][i];
39582@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
39583 /* Wait (up to 2 seconds) for a command to complete */
39584
39585 for (i = 200000; i > 0; i--) {
39586- done = hba[ctlr]->access.command_completed(hba[ctlr]);
39587+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
39588 if (done == 0) {
39589 udelay(10); /* a short fixed delay */
39590 } else
39591diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
39592index be73e9d..7fbf140 100644
39593--- a/drivers/block/cpqarray.h
39594+++ b/drivers/block/cpqarray.h
39595@@ -99,7 +99,7 @@ struct ctlr_info {
39596 drv_info_t drv[NWD];
39597 struct proc_dir_entry *proc;
39598
39599- struct access_method access;
39600+ struct access_method *access;
39601
39602 cmdlist_t *reqQ;
39603 cmdlist_t *cmpQ;
39604diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
39605index 426c97a..8c58607 100644
39606--- a/drivers/block/drbd/drbd_bitmap.c
39607+++ b/drivers/block/drbd/drbd_bitmap.c
39608@@ -1036,7 +1036,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
39609 submit_bio(rw, bio);
39610 /* this should not count as user activity and cause the
39611 * resync to throttle -- see drbd_rs_should_slow_down(). */
39612- atomic_add(len >> 9, &device->rs_sect_ev);
39613+ atomic_add_unchecked(len >> 9, &device->rs_sect_ev);
39614 }
39615 }
39616
39617diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
39618index 1a00001..c0d4253 100644
39619--- a/drivers/block/drbd/drbd_int.h
39620+++ b/drivers/block/drbd/drbd_int.h
39621@@ -387,7 +387,7 @@ struct drbd_epoch {
39622 struct drbd_connection *connection;
39623 struct list_head list;
39624 unsigned int barrier_nr;
39625- atomic_t epoch_size; /* increased on every request added. */
39626+ atomic_unchecked_t epoch_size; /* increased on every request added. */
39627 atomic_t active; /* increased on every req. added, and dec on every finished. */
39628 unsigned long flags;
39629 };
39630@@ -948,7 +948,7 @@ struct drbd_device {
39631 unsigned int al_tr_number;
39632 int al_tr_cycle;
39633 wait_queue_head_t seq_wait;
39634- atomic_t packet_seq;
39635+ atomic_unchecked_t packet_seq;
39636 unsigned int peer_seq;
39637 spinlock_t peer_seq_lock;
39638 unsigned long comm_bm_set; /* communicated number of set bits. */
39639@@ -957,8 +957,8 @@ struct drbd_device {
39640 struct mutex own_state_mutex;
39641 struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
39642 char congestion_reason; /* Why we where congested... */
39643- atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
39644- atomic_t rs_sect_ev; /* for submitted resync data rate, both */
39645+ atomic_unchecked_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
39646+ atomic_unchecked_t rs_sect_ev; /* for submitted resync data rate, both */
39647 int rs_last_sect_ev; /* counter to compare with */
39648 int rs_last_events; /* counter of read or write "events" (unit sectors)
39649 * on the lower level device when we last looked. */
39650@@ -1569,7 +1569,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
39651 char __user *uoptval;
39652 int err;
39653
39654- uoptval = (char __user __force *)optval;
39655+ uoptval = (char __force_user *)optval;
39656
39657 set_fs(KERNEL_DS);
39658 if (level == SOL_SOCKET)
39659diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c
39660index 89c497c..9c736ae 100644
39661--- a/drivers/block/drbd/drbd_interval.c
39662+++ b/drivers/block/drbd/drbd_interval.c
39663@@ -67,9 +67,9 @@ static void augment_rotate(struct rb_node *rb_old, struct rb_node *rb_new)
39664 }
39665
39666 static const struct rb_augment_callbacks augment_callbacks = {
39667- augment_propagate,
39668- augment_copy,
39669- augment_rotate,
39670+ .propagate = augment_propagate,
39671+ .copy = augment_copy,
39672+ .rotate = augment_rotate,
39673 };
39674
39675 /**
39676diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
39677index 9b465bb..00034ecf 100644
39678--- a/drivers/block/drbd/drbd_main.c
39679+++ b/drivers/block/drbd/drbd_main.c
39680@@ -1328,7 +1328,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
39681 p->sector = sector;
39682 p->block_id = block_id;
39683 p->blksize = blksize;
39684- p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
39685+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&peer_device->device->packet_seq));
39686 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
39687 }
39688
39689@@ -1634,7 +1634,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
39690 return -EIO;
39691 p->sector = cpu_to_be64(req->i.sector);
39692 p->block_id = (unsigned long)req;
39693- p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
39694+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&device->packet_seq));
39695 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
39696 if (device->state.conn >= C_SYNC_SOURCE &&
39697 device->state.conn <= C_PAUSED_SYNC_T)
39698@@ -1915,8 +1915,8 @@ void drbd_init_set_defaults(struct drbd_device *device)
39699 atomic_set(&device->unacked_cnt, 0);
39700 atomic_set(&device->local_cnt, 0);
39701 atomic_set(&device->pp_in_use_by_net, 0);
39702- atomic_set(&device->rs_sect_in, 0);
39703- atomic_set(&device->rs_sect_ev, 0);
39704+ atomic_set_unchecked(&device->rs_sect_in, 0);
39705+ atomic_set_unchecked(&device->rs_sect_ev, 0);
39706 atomic_set(&device->ap_in_flight, 0);
39707 atomic_set(&device->md_io.in_use, 0);
39708
39709@@ -2688,8 +2688,8 @@ void drbd_destroy_connection(struct kref *kref)
39710 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
39711 struct drbd_resource *resource = connection->resource;
39712
39713- if (atomic_read(&connection->current_epoch->epoch_size) != 0)
39714- drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
39715+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size) != 0)
39716+ drbd_err(connection, "epoch_size:%d\n", atomic_read_unchecked(&connection->current_epoch->epoch_size));
39717 kfree(connection->current_epoch);
39718
39719 idr_destroy(&connection->peer_devices);
39720diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
39721index 1cd47df..57c53c0 100644
39722--- a/drivers/block/drbd/drbd_nl.c
39723+++ b/drivers/block/drbd/drbd_nl.c
39724@@ -3645,13 +3645,13 @@ finish:
39725
39726 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
39727 {
39728- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
39729+ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
39730 struct sk_buff *msg;
39731 struct drbd_genlmsghdr *d_out;
39732 unsigned seq;
39733 int err = -ENOMEM;
39734
39735- seq = atomic_inc_return(&drbd_genl_seq);
39736+ seq = atomic_inc_return_unchecked(&drbd_genl_seq);
39737 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
39738 if (!msg)
39739 goto failed;
39740diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
39741index 9342b8d..b6a6825 100644
39742--- a/drivers/block/drbd/drbd_receiver.c
39743+++ b/drivers/block/drbd/drbd_receiver.c
39744@@ -870,7 +870,7 @@ int drbd_connected(struct drbd_peer_device *peer_device)
39745 struct drbd_device *device = peer_device->device;
39746 int err;
39747
39748- atomic_set(&device->packet_seq, 0);
39749+ atomic_set_unchecked(&device->packet_seq, 0);
39750 device->peer_seq = 0;
39751
39752 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
39753@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
39754 do {
39755 next_epoch = NULL;
39756
39757- epoch_size = atomic_read(&epoch->epoch_size);
39758+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
39759
39760 switch (ev & ~EV_CLEANUP) {
39761 case EV_PUT:
39762@@ -1273,7 +1273,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
39763 rv = FE_DESTROYED;
39764 } else {
39765 epoch->flags = 0;
39766- atomic_set(&epoch->epoch_size, 0);
39767+ atomic_set_unchecked(&epoch->epoch_size, 0);
39768 /* atomic_set(&epoch->active, 0); is already zero */
39769 if (rv == FE_STILL_LIVE)
39770 rv = FE_RECYCLED;
39771@@ -1550,7 +1550,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
39772 conn_wait_active_ee_empty(connection);
39773 drbd_flush(connection);
39774
39775- if (atomic_read(&connection->current_epoch->epoch_size)) {
39776+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
39777 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
39778 if (epoch)
39779 break;
39780@@ -1564,11 +1564,11 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
39781 }
39782
39783 epoch->flags = 0;
39784- atomic_set(&epoch->epoch_size, 0);
39785+ atomic_set_unchecked(&epoch->epoch_size, 0);
39786 atomic_set(&epoch->active, 0);
39787
39788 spin_lock(&connection->epoch_lock);
39789- if (atomic_read(&connection->current_epoch->epoch_size)) {
39790+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
39791 list_add(&epoch->list, &connection->current_epoch->list);
39792 connection->current_epoch = epoch;
39793 connection->epochs++;
39794@@ -1802,7 +1802,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
39795 list_add_tail(&peer_req->w.list, &device->sync_ee);
39796 spin_unlock_irq(&device->resource->req_lock);
39797
39798- atomic_add(pi->size >> 9, &device->rs_sect_ev);
39799+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_ev);
39800 if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
39801 return 0;
39802
39803@@ -1900,7 +1900,7 @@ static int receive_RSDataReply(struct drbd_connection *connection, struct packet
39804 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
39805 }
39806
39807- atomic_add(pi->size >> 9, &device->rs_sect_in);
39808+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_in);
39809
39810 return err;
39811 }
39812@@ -2290,7 +2290,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
39813
39814 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
39815 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
39816- atomic_inc(&connection->current_epoch->epoch_size);
39817+ atomic_inc_unchecked(&connection->current_epoch->epoch_size);
39818 err2 = drbd_drain_block(peer_device, pi->size);
39819 if (!err)
39820 err = err2;
39821@@ -2334,7 +2334,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
39822
39823 spin_lock(&connection->epoch_lock);
39824 peer_req->epoch = connection->current_epoch;
39825- atomic_inc(&peer_req->epoch->epoch_size);
39826+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
39827 atomic_inc(&peer_req->epoch->active);
39828 spin_unlock(&connection->epoch_lock);
39829
39830@@ -2479,7 +2479,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
39831
39832 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
39833 (int)part_stat_read(&disk->part0, sectors[1]) -
39834- atomic_read(&device->rs_sect_ev);
39835+ atomic_read_unchecked(&device->rs_sect_ev);
39836
39837 if (atomic_read(&device->ap_actlog_cnt)
39838 || !device->rs_last_events || curr_events - device->rs_last_events > 64) {
39839@@ -2618,7 +2618,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
39840 device->use_csums = true;
39841 } else if (pi->cmd == P_OV_REPLY) {
39842 /* track progress, we may need to throttle */
39843- atomic_add(size >> 9, &device->rs_sect_in);
39844+ atomic_add_unchecked(size >> 9, &device->rs_sect_in);
39845 peer_req->w.cb = w_e_end_ov_reply;
39846 dec_rs_pending(device);
39847 /* drbd_rs_begin_io done when we sent this request,
39848@@ -2691,7 +2691,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
39849 goto out_free_e;
39850
39851 submit_for_resync:
39852- atomic_add(size >> 9, &device->rs_sect_ev);
39853+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
39854
39855 submit:
39856 update_receiver_timing_details(connection, drbd_submit_peer_request);
39857@@ -4564,7 +4564,7 @@ struct data_cmd {
39858 int expect_payload;
39859 size_t pkt_size;
39860 int (*fn)(struct drbd_connection *, struct packet_info *);
39861-};
39862+} __do_const;
39863
39864 static struct data_cmd drbd_cmd_handler[] = {
39865 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
39866@@ -4678,7 +4678,7 @@ static void conn_disconnect(struct drbd_connection *connection)
39867 if (!list_empty(&connection->current_epoch->list))
39868 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
39869 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
39870- atomic_set(&connection->current_epoch->epoch_size, 0);
39871+ atomic_set_unchecked(&connection->current_epoch->epoch_size, 0);
39872 connection->send.seen_any_write_yet = false;
39873
39874 drbd_info(connection, "Connection closed\n");
39875@@ -5182,7 +5182,7 @@ static int got_IsInSync(struct drbd_connection *connection, struct packet_info *
39876 put_ldev(device);
39877 }
39878 dec_rs_pending(device);
39879- atomic_add(blksize >> 9, &device->rs_sect_in);
39880+ atomic_add_unchecked(blksize >> 9, &device->rs_sect_in);
39881
39882 return 0;
39883 }
39884@@ -5470,7 +5470,7 @@ static int connection_finish_peer_reqs(struct drbd_connection *connection)
39885 struct asender_cmd {
39886 size_t pkt_size;
39887 int (*fn)(struct drbd_connection *connection, struct packet_info *);
39888-};
39889+} __do_const;
39890
39891 static struct asender_cmd asender_tbl[] = {
39892 [P_PING] = { 0, got_Ping },
39893diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
39894index 50776b3..1477c3f 100644
39895--- a/drivers/block/drbd/drbd_worker.c
39896+++ b/drivers/block/drbd/drbd_worker.c
39897@@ -408,7 +408,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
39898 list_add_tail(&peer_req->w.list, &device->read_ee);
39899 spin_unlock_irq(&device->resource->req_lock);
39900
39901- atomic_add(size >> 9, &device->rs_sect_ev);
39902+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
39903 if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
39904 return 0;
39905
39906@@ -553,7 +553,7 @@ static int drbd_rs_number_requests(struct drbd_device *device)
39907 unsigned int sect_in; /* Number of sectors that came in since the last turn */
39908 int number, mxb;
39909
39910- sect_in = atomic_xchg(&device->rs_sect_in, 0);
39911+ sect_in = atomic_xchg_unchecked(&device->rs_sect_in, 0);
39912 device->rs_in_flight -= sect_in;
39913
39914 rcu_read_lock();
39915@@ -1594,8 +1594,8 @@ void drbd_rs_controller_reset(struct drbd_device *device)
39916 {
39917 struct fifo_buffer *plan;
39918
39919- atomic_set(&device->rs_sect_in, 0);
39920- atomic_set(&device->rs_sect_ev, 0);
39921+ atomic_set_unchecked(&device->rs_sect_in, 0);
39922+ atomic_set_unchecked(&device->rs_sect_ev, 0);
39923 device->rs_in_flight = 0;
39924
39925 /* Updating the RCU protected object in place is necessary since
39926diff --git a/drivers/block/loop.c b/drivers/block/loop.c
39927index 6cb1beb..bf490f7 100644
39928--- a/drivers/block/loop.c
39929+++ b/drivers/block/loop.c
39930@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
39931
39932 file_start_write(file);
39933 set_fs(get_ds());
39934- bw = file->f_op->write(file, buf, len, &pos);
39935+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
39936 set_fs(old_fs);
39937 file_end_write(file);
39938 if (likely(bw == len))
39939diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
39940index 02351e2..a9ea617 100644
39941--- a/drivers/block/nvme-core.c
39942+++ b/drivers/block/nvme-core.c
39943@@ -73,7 +73,6 @@ static LIST_HEAD(dev_list);
39944 static struct task_struct *nvme_thread;
39945 static struct workqueue_struct *nvme_workq;
39946 static wait_queue_head_t nvme_kthread_wait;
39947-static struct notifier_block nvme_nb;
39948
39949 static void nvme_reset_failed_dev(struct work_struct *ws);
39950
39951@@ -2925,6 +2924,10 @@ static struct pci_driver nvme_driver = {
39952 .err_handler = &nvme_err_handler,
39953 };
39954
39955+static struct notifier_block nvme_nb = {
39956+ .notifier_call = &nvme_cpu_notify,
39957+};
39958+
39959 static int __init nvme_init(void)
39960 {
39961 int result;
39962@@ -2941,7 +2944,6 @@ static int __init nvme_init(void)
39963 else if (result > 0)
39964 nvme_major = result;
39965
39966- nvme_nb.notifier_call = &nvme_cpu_notify;
39967 result = register_hotcpu_notifier(&nvme_nb);
39968 if (result)
39969 goto unregister_blkdev;
39970diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
39971index 758ac44..58087fd 100644
39972--- a/drivers/block/pktcdvd.c
39973+++ b/drivers/block/pktcdvd.c
39974@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
39975
39976 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
39977 {
39978- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
39979+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
39980 }
39981
39982 /*
39983@@ -1888,7 +1888,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
39984 return -EROFS;
39985 }
39986 pd->settings.fp = ti.fp;
39987- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
39988+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
39989
39990 if (ti.nwa_v) {
39991 pd->nwa = be32_to_cpu(ti.next_writable);
39992diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
39993index e5565fb..71be10b4 100644
39994--- a/drivers/block/smart1,2.h
39995+++ b/drivers/block/smart1,2.h
39996@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
39997 }
39998
39999 static struct access_method smart4_access = {
40000- smart4_submit_command,
40001- smart4_intr_mask,
40002- smart4_fifo_full,
40003- smart4_intr_pending,
40004- smart4_completed,
40005+ .submit_command = smart4_submit_command,
40006+ .set_intr_mask = smart4_intr_mask,
40007+ .fifo_full = smart4_fifo_full,
40008+ .intr_pending = smart4_intr_pending,
40009+ .command_completed = smart4_completed,
40010 };
40011
40012 /*
40013@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
40014 }
40015
40016 static struct access_method smart2_access = {
40017- smart2_submit_command,
40018- smart2_intr_mask,
40019- smart2_fifo_full,
40020- smart2_intr_pending,
40021- smart2_completed,
40022+ .submit_command = smart2_submit_command,
40023+ .set_intr_mask = smart2_intr_mask,
40024+ .fifo_full = smart2_fifo_full,
40025+ .intr_pending = smart2_intr_pending,
40026+ .command_completed = smart2_completed,
40027 };
40028
40029 /*
40030@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
40031 }
40032
40033 static struct access_method smart2e_access = {
40034- smart2e_submit_command,
40035- smart2e_intr_mask,
40036- smart2e_fifo_full,
40037- smart2e_intr_pending,
40038- smart2e_completed,
40039+ .submit_command = smart2e_submit_command,
40040+ .set_intr_mask = smart2e_intr_mask,
40041+ .fifo_full = smart2e_fifo_full,
40042+ .intr_pending = smart2e_intr_pending,
40043+ .command_completed = smart2e_completed,
40044 };
40045
40046 /*
40047@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
40048 }
40049
40050 static struct access_method smart1_access = {
40051- smart1_submit_command,
40052- smart1_intr_mask,
40053- smart1_fifo_full,
40054- smart1_intr_pending,
40055- smart1_completed,
40056+ .submit_command = smart1_submit_command,
40057+ .set_intr_mask = smart1_intr_mask,
40058+ .fifo_full = smart1_fifo_full,
40059+ .intr_pending = smart1_intr_pending,
40060+ .command_completed = smart1_completed,
40061 };
40062diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
40063index f038dba..bb74c08 100644
40064--- a/drivers/bluetooth/btwilink.c
40065+++ b/drivers/bluetooth/btwilink.c
40066@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
40067
40068 static int bt_ti_probe(struct platform_device *pdev)
40069 {
40070- static struct ti_st *hst;
40071+ struct ti_st *hst;
40072 struct hci_dev *hdev;
40073 int err;
40074
40075diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
40076index 898b84b..86f74b9 100644
40077--- a/drivers/cdrom/cdrom.c
40078+++ b/drivers/cdrom/cdrom.c
40079@@ -610,7 +610,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
40080 ENSURE(reset, CDC_RESET);
40081 ENSURE(generic_packet, CDC_GENERIC_PACKET);
40082 cdi->mc_flags = 0;
40083- cdo->n_minors = 0;
40084 cdi->options = CDO_USE_FFLAGS;
40085
40086 if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
40087@@ -630,8 +629,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
40088 else
40089 cdi->cdda_method = CDDA_OLD;
40090
40091- if (!cdo->generic_packet)
40092- cdo->generic_packet = cdrom_dummy_generic_packet;
40093+ if (!cdo->generic_packet) {
40094+ pax_open_kernel();
40095+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
40096+ pax_close_kernel();
40097+ }
40098
40099 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
40100 mutex_lock(&cdrom_mutex);
40101@@ -652,7 +654,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
40102 if (cdi->exit)
40103 cdi->exit(cdi);
40104
40105- cdi->ops->n_minors--;
40106 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
40107 }
40108
40109@@ -2126,7 +2127,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
40110 */
40111 nr = nframes;
40112 do {
40113- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
40114+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
40115 if (cgc.buffer)
40116 break;
40117
40118@@ -3434,7 +3435,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
40119 struct cdrom_device_info *cdi;
40120 int ret;
40121
40122- ret = scnprintf(info + *pos, max_size - *pos, header);
40123+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
40124 if (!ret)
40125 return 1;
40126
40127diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
40128index 584bc31..e64a12c 100644
40129--- a/drivers/cdrom/gdrom.c
40130+++ b/drivers/cdrom/gdrom.c
40131@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
40132 .audio_ioctl = gdrom_audio_ioctl,
40133 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
40134 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
40135- .n_minors = 1,
40136 };
40137
40138 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
40139diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
40140index 6e9f74a..50c7cea 100644
40141--- a/drivers/char/Kconfig
40142+++ b/drivers/char/Kconfig
40143@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
40144
40145 config DEVKMEM
40146 bool "/dev/kmem virtual device support"
40147- default y
40148+ default n
40149+ depends on !GRKERNSEC_KMEM
40150 help
40151 Say Y here if you want to support the /dev/kmem device. The
40152 /dev/kmem device is rarely used, but can be used for certain
40153@@ -577,6 +578,7 @@ config DEVPORT
40154 bool
40155 depends on !M68K
40156 depends on ISA || PCI
40157+ depends on !GRKERNSEC_KMEM
40158 default y
40159
40160 source "drivers/s390/char/Kconfig"
40161diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
40162index a48e05b..6bac831 100644
40163--- a/drivers/char/agp/compat_ioctl.c
40164+++ b/drivers/char/agp/compat_ioctl.c
40165@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
40166 return -ENOMEM;
40167 }
40168
40169- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
40170+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
40171 sizeof(*usegment) * ureserve.seg_count)) {
40172 kfree(usegment);
40173 kfree(ksegment);
40174diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
40175index 09f17eb..8531d2f 100644
40176--- a/drivers/char/agp/frontend.c
40177+++ b/drivers/char/agp/frontend.c
40178@@ -806,7 +806,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
40179 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
40180 return -EFAULT;
40181
40182- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
40183+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
40184 return -EFAULT;
40185
40186 client = agp_find_client_by_pid(reserve.pid);
40187@@ -836,7 +836,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
40188 if (segment == NULL)
40189 return -ENOMEM;
40190
40191- if (copy_from_user(segment, (void __user *) reserve.seg_list,
40192+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
40193 sizeof(struct agp_segment) * reserve.seg_count)) {
40194 kfree(segment);
40195 return -EFAULT;
40196diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
40197index 4f94375..413694e 100644
40198--- a/drivers/char/genrtc.c
40199+++ b/drivers/char/genrtc.c
40200@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
40201 switch (cmd) {
40202
40203 case RTC_PLL_GET:
40204+ memset(&pll, 0, sizeof(pll));
40205 if (get_rtc_pll(&pll))
40206 return -EINVAL;
40207 else
40208diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
40209index d5d4cd8..22d561d 100644
40210--- a/drivers/char/hpet.c
40211+++ b/drivers/char/hpet.c
40212@@ -575,7 +575,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
40213 }
40214
40215 static int
40216-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
40217+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
40218 struct hpet_info *info)
40219 {
40220 struct hpet_timer __iomem *timer;
40221diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
40222index 86fe45c..c0ea948 100644
40223--- a/drivers/char/hw_random/intel-rng.c
40224+++ b/drivers/char/hw_random/intel-rng.c
40225@@ -314,7 +314,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
40226
40227 if (no_fwh_detect)
40228 return -ENODEV;
40229- printk(warning);
40230+ printk("%s", warning);
40231 return -EBUSY;
40232 }
40233
40234diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
40235index e6db938..835e3a2 100644
40236--- a/drivers/char/ipmi/ipmi_msghandler.c
40237+++ b/drivers/char/ipmi/ipmi_msghandler.c
40238@@ -438,7 +438,7 @@ struct ipmi_smi {
40239 struct proc_dir_entry *proc_dir;
40240 char proc_dir_name[10];
40241
40242- atomic_t stats[IPMI_NUM_STATS];
40243+ atomic_unchecked_t stats[IPMI_NUM_STATS];
40244
40245 /*
40246 * run_to_completion duplicate of smb_info, smi_info
40247@@ -470,9 +470,9 @@ static LIST_HEAD(smi_watchers);
40248 static DEFINE_MUTEX(smi_watchers_mutex);
40249
40250 #define ipmi_inc_stat(intf, stat) \
40251- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
40252+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
40253 #define ipmi_get_stat(intf, stat) \
40254- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
40255+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
40256
40257 static int is_lan_addr(struct ipmi_addr *addr)
40258 {
40259@@ -2926,7 +2926,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
40260 INIT_LIST_HEAD(&intf->cmd_rcvrs);
40261 init_waitqueue_head(&intf->waitq);
40262 for (i = 0; i < IPMI_NUM_STATS; i++)
40263- atomic_set(&intf->stats[i], 0);
40264+ atomic_set_unchecked(&intf->stats[i], 0);
40265
40266 intf->proc_dir = NULL;
40267
40268diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
40269index 5d66568..c9d93c3 100644
40270--- a/drivers/char/ipmi/ipmi_si_intf.c
40271+++ b/drivers/char/ipmi/ipmi_si_intf.c
40272@@ -285,7 +285,7 @@ struct smi_info {
40273 unsigned char slave_addr;
40274
40275 /* Counters and things for the proc filesystem. */
40276- atomic_t stats[SI_NUM_STATS];
40277+ atomic_unchecked_t stats[SI_NUM_STATS];
40278
40279 struct task_struct *thread;
40280
40281@@ -294,9 +294,9 @@ struct smi_info {
40282 };
40283
40284 #define smi_inc_stat(smi, stat) \
40285- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
40286+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
40287 #define smi_get_stat(smi, stat) \
40288- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
40289+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
40290
40291 #define SI_MAX_PARMS 4
40292
40293@@ -3374,7 +3374,7 @@ static int try_smi_init(struct smi_info *new_smi)
40294 atomic_set(&new_smi->req_events, 0);
40295 new_smi->run_to_completion = false;
40296 for (i = 0; i < SI_NUM_STATS; i++)
40297- atomic_set(&new_smi->stats[i], 0);
40298+ atomic_set_unchecked(&new_smi->stats[i], 0);
40299
40300 new_smi->interrupt_disabled = true;
40301 atomic_set(&new_smi->stop_operation, 0);
40302diff --git a/drivers/char/mem.c b/drivers/char/mem.c
40303index 917403f..dddd899 100644
40304--- a/drivers/char/mem.c
40305+++ b/drivers/char/mem.c
40306@@ -18,6 +18,7 @@
40307 #include <linux/raw.h>
40308 #include <linux/tty.h>
40309 #include <linux/capability.h>
40310+#include <linux/security.h>
40311 #include <linux/ptrace.h>
40312 #include <linux/device.h>
40313 #include <linux/highmem.h>
40314@@ -36,6 +37,10 @@
40315
40316 #define DEVPORT_MINOR 4
40317
40318+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
40319+extern const struct file_operations grsec_fops;
40320+#endif
40321+
40322 static inline unsigned long size_inside_page(unsigned long start,
40323 unsigned long size)
40324 {
40325@@ -67,9 +72,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
40326
40327 while (cursor < to) {
40328 if (!devmem_is_allowed(pfn)) {
40329+#ifdef CONFIG_GRKERNSEC_KMEM
40330+ gr_handle_mem_readwrite(from, to);
40331+#else
40332 printk(KERN_INFO
40333 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
40334 current->comm, from, to);
40335+#endif
40336 return 0;
40337 }
40338 cursor += PAGE_SIZE;
40339@@ -77,6 +86,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
40340 }
40341 return 1;
40342 }
40343+#elif defined(CONFIG_GRKERNSEC_KMEM)
40344+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
40345+{
40346+ return 0;
40347+}
40348 #else
40349 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
40350 {
40351@@ -122,6 +136,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
40352
40353 while (count > 0) {
40354 unsigned long remaining;
40355+ char *temp;
40356
40357 sz = size_inside_page(p, count);
40358
40359@@ -137,7 +152,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
40360 if (!ptr)
40361 return -EFAULT;
40362
40363- remaining = copy_to_user(buf, ptr, sz);
40364+#ifdef CONFIG_PAX_USERCOPY
40365+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
40366+ if (!temp) {
40367+ unxlate_dev_mem_ptr(p, ptr);
40368+ return -ENOMEM;
40369+ }
40370+ memcpy(temp, ptr, sz);
40371+#else
40372+ temp = ptr;
40373+#endif
40374+
40375+ remaining = copy_to_user(buf, temp, sz);
40376+
40377+#ifdef CONFIG_PAX_USERCOPY
40378+ kfree(temp);
40379+#endif
40380+
40381 unxlate_dev_mem_ptr(p, ptr);
40382 if (remaining)
40383 return -EFAULT;
40384@@ -369,9 +400,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
40385 size_t count, loff_t *ppos)
40386 {
40387 unsigned long p = *ppos;
40388- ssize_t low_count, read, sz;
40389+ ssize_t low_count, read, sz, err = 0;
40390 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
40391- int err = 0;
40392
40393 read = 0;
40394 if (p < (unsigned long) high_memory) {
40395@@ -393,6 +423,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
40396 }
40397 #endif
40398 while (low_count > 0) {
40399+ char *temp;
40400+
40401 sz = size_inside_page(p, low_count);
40402
40403 /*
40404@@ -402,7 +434,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
40405 */
40406 kbuf = xlate_dev_kmem_ptr((char *)p);
40407
40408- if (copy_to_user(buf, kbuf, sz))
40409+#ifdef CONFIG_PAX_USERCOPY
40410+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
40411+ if (!temp)
40412+ return -ENOMEM;
40413+ memcpy(temp, kbuf, sz);
40414+#else
40415+ temp = kbuf;
40416+#endif
40417+
40418+ err = copy_to_user(buf, temp, sz);
40419+
40420+#ifdef CONFIG_PAX_USERCOPY
40421+ kfree(temp);
40422+#endif
40423+
40424+ if (err)
40425 return -EFAULT;
40426 buf += sz;
40427 p += sz;
40428@@ -827,6 +874,9 @@ static const struct memdev {
40429 #ifdef CONFIG_PRINTK
40430 [11] = { "kmsg", 0644, &kmsg_fops, NULL },
40431 #endif
40432+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
40433+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
40434+#endif
40435 };
40436
40437 static int memory_open(struct inode *inode, struct file *filp)
40438@@ -898,7 +948,7 @@ static int __init chr_dev_init(void)
40439 continue;
40440
40441 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
40442- NULL, devlist[minor].name);
40443+ NULL, "%s", devlist[minor].name);
40444 }
40445
40446 return tty_init();
40447diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
40448index 9df78e2..01ba9ae 100644
40449--- a/drivers/char/nvram.c
40450+++ b/drivers/char/nvram.c
40451@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
40452
40453 spin_unlock_irq(&rtc_lock);
40454
40455- if (copy_to_user(buf, contents, tmp - contents))
40456+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
40457 return -EFAULT;
40458
40459 *ppos = i;
40460diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
40461index 0ea9986..e7b07e4 100644
40462--- a/drivers/char/pcmcia/synclink_cs.c
40463+++ b/drivers/char/pcmcia/synclink_cs.c
40464@@ -2345,7 +2345,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
40465
40466 if (debug_level >= DEBUG_LEVEL_INFO)
40467 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
40468- __FILE__, __LINE__, info->device_name, port->count);
40469+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
40470
40471 if (tty_port_close_start(port, tty, filp) == 0)
40472 goto cleanup;
40473@@ -2363,7 +2363,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
40474 cleanup:
40475 if (debug_level >= DEBUG_LEVEL_INFO)
40476 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
40477- tty->driver->name, port->count);
40478+ tty->driver->name, atomic_read(&port->count));
40479 }
40480
40481 /* Wait until the transmitter is empty.
40482@@ -2505,7 +2505,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
40483
40484 if (debug_level >= DEBUG_LEVEL_INFO)
40485 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
40486- __FILE__, __LINE__, tty->driver->name, port->count);
40487+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
40488
40489 /* If port is closing, signal caller to try again */
40490 if (port->flags & ASYNC_CLOSING){
40491@@ -2525,11 +2525,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
40492 goto cleanup;
40493 }
40494 spin_lock(&port->lock);
40495- port->count++;
40496+ atomic_inc(&port->count);
40497 spin_unlock(&port->lock);
40498 spin_unlock_irqrestore(&info->netlock, flags);
40499
40500- if (port->count == 1) {
40501+ if (atomic_read(&port->count) == 1) {
40502 /* 1st open on this device, init hardware */
40503 retval = startup(info, tty);
40504 if (retval < 0)
40505@@ -3918,7 +3918,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
40506 unsigned short new_crctype;
40507
40508 /* return error if TTY interface open */
40509- if (info->port.count)
40510+ if (atomic_read(&info->port.count))
40511 return -EBUSY;
40512
40513 switch (encoding)
40514@@ -4022,7 +4022,7 @@ static int hdlcdev_open(struct net_device *dev)
40515
40516 /* arbitrate between network and tty opens */
40517 spin_lock_irqsave(&info->netlock, flags);
40518- if (info->port.count != 0 || info->netcount != 0) {
40519+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
40520 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
40521 spin_unlock_irqrestore(&info->netlock, flags);
40522 return -EBUSY;
40523@@ -4112,7 +4112,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40524 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
40525
40526 /* return error if TTY interface open */
40527- if (info->port.count)
40528+ if (atomic_read(&info->port.count))
40529 return -EBUSY;
40530
40531 if (cmd != SIOCWANDEV)
40532diff --git a/drivers/char/random.c b/drivers/char/random.c
40533index c18d41d..7c499f3 100644
40534--- a/drivers/char/random.c
40535+++ b/drivers/char/random.c
40536@@ -289,9 +289,6 @@
40537 /*
40538 * To allow fractional bits to be tracked, the entropy_count field is
40539 * denominated in units of 1/8th bits.
40540- *
40541- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
40542- * credit_entropy_bits() needs to be 64 bits wide.
40543 */
40544 #define ENTROPY_SHIFT 3
40545 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
40546@@ -439,9 +436,9 @@ struct entropy_store {
40547 };
40548
40549 static void push_to_pool(struct work_struct *work);
40550-static __u32 input_pool_data[INPUT_POOL_WORDS];
40551-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
40552-static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
40553+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
40554+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
40555+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
40556
40557 static struct entropy_store input_pool = {
40558 .poolinfo = &poolinfo_table[0],
40559@@ -635,7 +632,7 @@ retry:
40560 /* The +2 corresponds to the /4 in the denominator */
40561
40562 do {
40563- unsigned int anfrac = min(pnfrac, pool_size/2);
40564+ u64 anfrac = min(pnfrac, pool_size/2);
40565 unsigned int add =
40566 ((pool_size - entropy_count)*anfrac*3) >> s;
40567
40568@@ -1106,7 +1103,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
40569 __mix_pool_bytes(r, hash.w, sizeof(hash.w));
40570 spin_unlock_irqrestore(&r->lock, flags);
40571
40572- memset(workspace, 0, sizeof(workspace));
40573+ memzero_explicit(workspace, sizeof(workspace));
40574
40575 /*
40576 * In case the hash function has some recognizable output
40577@@ -1118,7 +1115,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
40578 hash.w[2] ^= rol32(hash.w[2], 16);
40579
40580 memcpy(out, &hash, EXTRACT_SIZE);
40581- memset(&hash, 0, sizeof(hash));
40582+ memzero_explicit(&hash, sizeof(hash));
40583 }
40584
40585 /*
40586@@ -1175,7 +1172,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
40587 }
40588
40589 /* Wipe data just returned from memory */
40590- memset(tmp, 0, sizeof(tmp));
40591+ memzero_explicit(tmp, sizeof(tmp));
40592
40593 return ret;
40594 }
40595@@ -1207,7 +1204,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
40596
40597 extract_buf(r, tmp);
40598 i = min_t(int, nbytes, EXTRACT_SIZE);
40599- if (copy_to_user(buf, tmp, i)) {
40600+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
40601 ret = -EFAULT;
40602 break;
40603 }
40604@@ -1218,7 +1215,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
40605 }
40606
40607 /* Wipe data just returned from memory */
40608- memset(tmp, 0, sizeof(tmp));
40609+ memzero_explicit(tmp, sizeof(tmp));
40610
40611 return ret;
40612 }
40613@@ -1590,7 +1587,7 @@ static char sysctl_bootid[16];
40614 static int proc_do_uuid(struct ctl_table *table, int write,
40615 void __user *buffer, size_t *lenp, loff_t *ppos)
40616 {
40617- struct ctl_table fake_table;
40618+ ctl_table_no_const fake_table;
40619 unsigned char buf[64], tmp_uuid[16], *uuid;
40620
40621 uuid = table->data;
40622@@ -1620,7 +1617,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
40623 static int proc_do_entropy(struct ctl_table *table, int write,
40624 void __user *buffer, size_t *lenp, loff_t *ppos)
40625 {
40626- struct ctl_table fake_table;
40627+ ctl_table_no_const fake_table;
40628 int entropy_count;
40629
40630 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
40631diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
40632index 7cc1fe22..b602d6b 100644
40633--- a/drivers/char/sonypi.c
40634+++ b/drivers/char/sonypi.c
40635@@ -54,6 +54,7 @@
40636
40637 #include <asm/uaccess.h>
40638 #include <asm/io.h>
40639+#include <asm/local.h>
40640
40641 #include <linux/sonypi.h>
40642
40643@@ -490,7 +491,7 @@ static struct sonypi_device {
40644 spinlock_t fifo_lock;
40645 wait_queue_head_t fifo_proc_list;
40646 struct fasync_struct *fifo_async;
40647- int open_count;
40648+ local_t open_count;
40649 int model;
40650 struct input_dev *input_jog_dev;
40651 struct input_dev *input_key_dev;
40652@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
40653 static int sonypi_misc_release(struct inode *inode, struct file *file)
40654 {
40655 mutex_lock(&sonypi_device.lock);
40656- sonypi_device.open_count--;
40657+ local_dec(&sonypi_device.open_count);
40658 mutex_unlock(&sonypi_device.lock);
40659 return 0;
40660 }
40661@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
40662 {
40663 mutex_lock(&sonypi_device.lock);
40664 /* Flush input queue on first open */
40665- if (!sonypi_device.open_count)
40666+ if (!local_read(&sonypi_device.open_count))
40667 kfifo_reset(&sonypi_device.fifo);
40668- sonypi_device.open_count++;
40669+ local_inc(&sonypi_device.open_count);
40670 mutex_unlock(&sonypi_device.lock);
40671
40672 return 0;
40673diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
40674index 565a947..dcdc06e 100644
40675--- a/drivers/char/tpm/tpm_acpi.c
40676+++ b/drivers/char/tpm/tpm_acpi.c
40677@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
40678 virt = acpi_os_map_iomem(start, len);
40679 if (!virt) {
40680 kfree(log->bios_event_log);
40681+ log->bios_event_log = NULL;
40682 printk("%s: ERROR - Unable to map memory\n", __func__);
40683 return -EIO;
40684 }
40685
40686- memcpy_fromio(log->bios_event_log, virt, len);
40687+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
40688
40689 acpi_os_unmap_iomem(virt, len);
40690 return 0;
40691diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
40692index 3a56a13..f8cbd25 100644
40693--- a/drivers/char/tpm/tpm_eventlog.c
40694+++ b/drivers/char/tpm/tpm_eventlog.c
40695@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
40696 event = addr;
40697
40698 if ((event->event_type == 0 && event->event_size == 0) ||
40699- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
40700+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
40701 return NULL;
40702
40703 return addr;
40704@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
40705 return NULL;
40706
40707 if ((event->event_type == 0 && event->event_size == 0) ||
40708- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
40709+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
40710 return NULL;
40711
40712 (*pos)++;
40713@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
40714 int i;
40715
40716 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
40717- seq_putc(m, data[i]);
40718+ if (!seq_putc(m, data[i]))
40719+ return -EFAULT;
40720
40721 return 0;
40722 }
40723diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
40724index b585b47..488f43e 100644
40725--- a/drivers/char/virtio_console.c
40726+++ b/drivers/char/virtio_console.c
40727@@ -684,7 +684,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
40728 if (to_user) {
40729 ssize_t ret;
40730
40731- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
40732+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
40733 if (ret)
40734 return -EFAULT;
40735 } else {
40736@@ -787,7 +787,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
40737 if (!port_has_data(port) && !port->host_connected)
40738 return 0;
40739
40740- return fill_readbuf(port, ubuf, count, true);
40741+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
40742 }
40743
40744 static int wait_port_writable(struct port *port, bool nonblock)
40745diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
40746index b9355da..9611f4e 100644
40747--- a/drivers/clk/clk-composite.c
40748+++ b/drivers/clk/clk-composite.c
40749@@ -191,7 +191,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
40750 struct clk *clk;
40751 struct clk_init_data init;
40752 struct clk_composite *composite;
40753- struct clk_ops *clk_composite_ops;
40754+ clk_ops_no_const *clk_composite_ops;
40755
40756 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
40757 if (!composite) {
40758diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
40759index dd3a78c..386d49c 100644
40760--- a/drivers/clk/socfpga/clk-gate.c
40761+++ b/drivers/clk/socfpga/clk-gate.c
40762@@ -22,6 +22,7 @@
40763 #include <linux/mfd/syscon.h>
40764 #include <linux/of.h>
40765 #include <linux/regmap.h>
40766+#include <asm/pgtable.h>
40767
40768 #include "clk.h"
40769
40770@@ -174,7 +175,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
40771 return 0;
40772 }
40773
40774-static struct clk_ops gateclk_ops = {
40775+static clk_ops_no_const gateclk_ops __read_only = {
40776 .prepare = socfpga_clk_prepare,
40777 .recalc_rate = socfpga_clk_recalc_rate,
40778 .get_parent = socfpga_clk_get_parent,
40779@@ -208,8 +209,10 @@ static void __init __socfpga_gate_init(struct device_node *node,
40780 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
40781 socfpga_clk->hw.bit_idx = clk_gate[1];
40782
40783- gateclk_ops.enable = clk_gate_ops.enable;
40784- gateclk_ops.disable = clk_gate_ops.disable;
40785+ pax_open_kernel();
40786+ *(void **)&gateclk_ops.enable = clk_gate_ops.enable;
40787+ *(void **)&gateclk_ops.disable = clk_gate_ops.disable;
40788+ pax_close_kernel();
40789 }
40790
40791 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
40792diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
40793index de6da95..c98278b 100644
40794--- a/drivers/clk/socfpga/clk-pll.c
40795+++ b/drivers/clk/socfpga/clk-pll.c
40796@@ -21,6 +21,7 @@
40797 #include <linux/io.h>
40798 #include <linux/of.h>
40799 #include <linux/of_address.h>
40800+#include <asm/pgtable.h>
40801
40802 #include "clk.h"
40803
40804@@ -76,7 +77,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
40805 CLK_MGR_PLL_CLK_SRC_MASK;
40806 }
40807
40808-static struct clk_ops clk_pll_ops = {
40809+static clk_ops_no_const clk_pll_ops __read_only = {
40810 .recalc_rate = clk_pll_recalc_rate,
40811 .get_parent = clk_pll_get_parent,
40812 };
40813@@ -120,8 +121,10 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
40814 pll_clk->hw.hw.init = &init;
40815
40816 pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
40817- clk_pll_ops.enable = clk_gate_ops.enable;
40818- clk_pll_ops.disable = clk_gate_ops.disable;
40819+ pax_open_kernel();
40820+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
40821+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
40822+ pax_close_kernel();
40823
40824 clk = clk_register(NULL, &pll_clk->hw.hw);
40825 if (WARN_ON(IS_ERR(clk))) {
40826diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
40827index b0c18ed..1713a80 100644
40828--- a/drivers/cpufreq/acpi-cpufreq.c
40829+++ b/drivers/cpufreq/acpi-cpufreq.c
40830@@ -675,8 +675,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
40831 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
40832 per_cpu(acfreq_data, cpu) = data;
40833
40834- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
40835- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
40836+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
40837+ pax_open_kernel();
40838+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
40839+ pax_close_kernel();
40840+ }
40841
40842 result = acpi_processor_register_performance(data->acpi_data, cpu);
40843 if (result)
40844@@ -809,7 +812,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
40845 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
40846 break;
40847 case ACPI_ADR_SPACE_FIXED_HARDWARE:
40848- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
40849+ pax_open_kernel();
40850+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
40851+ pax_close_kernel();
40852 break;
40853 default:
40854 break;
40855@@ -903,8 +908,10 @@ static void __init acpi_cpufreq_boost_init(void)
40856 if (!msrs)
40857 return;
40858
40859- acpi_cpufreq_driver.boost_supported = true;
40860- acpi_cpufreq_driver.boost_enabled = boost_state(0);
40861+ pax_open_kernel();
40862+ *(bool *)&acpi_cpufreq_driver.boost_supported = true;
40863+ *(bool *)&acpi_cpufreq_driver.boost_enabled = boost_state(0);
40864+ pax_close_kernel();
40865
40866 cpu_notifier_register_begin();
40867
40868diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
40869index 61190f6..fcd899a 100644
40870--- a/drivers/cpufreq/cpufreq.c
40871+++ b/drivers/cpufreq/cpufreq.c
40872@@ -2095,7 +2095,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
40873 }
40874
40875 mutex_lock(&cpufreq_governor_mutex);
40876- list_del(&governor->governor_list);
40877+ pax_list_del(&governor->governor_list);
40878 mutex_unlock(&cpufreq_governor_mutex);
40879 return;
40880 }
40881@@ -2311,7 +2311,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
40882 return NOTIFY_OK;
40883 }
40884
40885-static struct notifier_block __refdata cpufreq_cpu_notifier = {
40886+static struct notifier_block cpufreq_cpu_notifier = {
40887 .notifier_call = cpufreq_cpu_callback,
40888 };
40889
40890@@ -2351,13 +2351,17 @@ int cpufreq_boost_trigger_state(int state)
40891 return 0;
40892
40893 write_lock_irqsave(&cpufreq_driver_lock, flags);
40894- cpufreq_driver->boost_enabled = state;
40895+ pax_open_kernel();
40896+ *(bool *)&cpufreq_driver->boost_enabled = state;
40897+ pax_close_kernel();
40898 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
40899
40900 ret = cpufreq_driver->set_boost(state);
40901 if (ret) {
40902 write_lock_irqsave(&cpufreq_driver_lock, flags);
40903- cpufreq_driver->boost_enabled = !state;
40904+ pax_open_kernel();
40905+ *(bool *)&cpufreq_driver->boost_enabled = !state;
40906+ pax_close_kernel();
40907 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
40908
40909 pr_err("%s: Cannot %s BOOST\n",
40910@@ -2414,8 +2418,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
40911
40912 pr_debug("trying to register driver %s\n", driver_data->name);
40913
40914- if (driver_data->setpolicy)
40915- driver_data->flags |= CPUFREQ_CONST_LOOPS;
40916+ if (driver_data->setpolicy) {
40917+ pax_open_kernel();
40918+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
40919+ pax_close_kernel();
40920+ }
40921
40922 write_lock_irqsave(&cpufreq_driver_lock, flags);
40923 if (cpufreq_driver) {
40924@@ -2430,8 +2437,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
40925 * Check if driver provides function to enable boost -
40926 * if not, use cpufreq_boost_set_sw as default
40927 */
40928- if (!cpufreq_driver->set_boost)
40929- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
40930+ if (!cpufreq_driver->set_boost) {
40931+ pax_open_kernel();
40932+ *(void **)&cpufreq_driver->set_boost = cpufreq_boost_set_sw;
40933+ pax_close_kernel();
40934+ }
40935
40936 ret = cpufreq_sysfs_create_file(&boost.attr);
40937 if (ret) {
40938diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
40939index 1b44496..b80ff5e 100644
40940--- a/drivers/cpufreq/cpufreq_governor.c
40941+++ b/drivers/cpufreq/cpufreq_governor.c
40942@@ -245,7 +245,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
40943 struct dbs_data *dbs_data;
40944 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
40945 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
40946- struct od_ops *od_ops = NULL;
40947+ const struct od_ops *od_ops = NULL;
40948 struct od_dbs_tuners *od_tuners = NULL;
40949 struct cs_dbs_tuners *cs_tuners = NULL;
40950 struct cpu_dbs_common_info *cpu_cdbs;
40951@@ -311,7 +311,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
40952
40953 if ((cdata->governor == GOV_CONSERVATIVE) &&
40954 (!policy->governor->initialized)) {
40955- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
40956+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
40957
40958 cpufreq_register_notifier(cs_ops->notifier_block,
40959 CPUFREQ_TRANSITION_NOTIFIER);
40960@@ -331,7 +331,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
40961
40962 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
40963 (policy->governor->initialized == 1)) {
40964- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
40965+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
40966
40967 cpufreq_unregister_notifier(cs_ops->notifier_block,
40968 CPUFREQ_TRANSITION_NOTIFIER);
40969diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
40970index cc401d1..8197340 100644
40971--- a/drivers/cpufreq/cpufreq_governor.h
40972+++ b/drivers/cpufreq/cpufreq_governor.h
40973@@ -212,7 +212,7 @@ struct common_dbs_data {
40974 void (*exit)(struct dbs_data *dbs_data);
40975
40976 /* Governor specific ops, see below */
40977- void *gov_ops;
40978+ const void *gov_ops;
40979 };
40980
40981 /* Governor Per policy data */
40982@@ -232,7 +232,7 @@ struct od_ops {
40983 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
40984 unsigned int freq_next, unsigned int relation);
40985 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
40986-};
40987+} __no_const;
40988
40989 struct cs_ops {
40990 struct notifier_block *notifier_block;
40991diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
40992index ad3f38f..8f086cd 100644
40993--- a/drivers/cpufreq/cpufreq_ondemand.c
40994+++ b/drivers/cpufreq/cpufreq_ondemand.c
40995@@ -524,7 +524,7 @@ static void od_exit(struct dbs_data *dbs_data)
40996
40997 define_get_cpu_dbs_routines(od_cpu_dbs_info);
40998
40999-static struct od_ops od_ops = {
41000+static struct od_ops od_ops __read_only = {
41001 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
41002 .powersave_bias_target = generic_powersave_bias_target,
41003 .freq_increase = dbs_freq_increase,
41004@@ -579,14 +579,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
41005 (struct cpufreq_policy *, unsigned int, unsigned int),
41006 unsigned int powersave_bias)
41007 {
41008- od_ops.powersave_bias_target = f;
41009+ pax_open_kernel();
41010+ *(void **)&od_ops.powersave_bias_target = f;
41011+ pax_close_kernel();
41012 od_set_powersave_bias(powersave_bias);
41013 }
41014 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
41015
41016 void od_unregister_powersave_bias_handler(void)
41017 {
41018- od_ops.powersave_bias_target = generic_powersave_bias_target;
41019+ pax_open_kernel();
41020+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
41021+ pax_close_kernel();
41022 od_set_powersave_bias(0);
41023 }
41024 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
41025diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
41026index 0668b38..2f3ea18 100644
41027--- a/drivers/cpufreq/intel_pstate.c
41028+++ b/drivers/cpufreq/intel_pstate.c
41029@@ -120,10 +120,10 @@ struct pstate_funcs {
41030 struct cpu_defaults {
41031 struct pstate_adjust_policy pid_policy;
41032 struct pstate_funcs funcs;
41033-};
41034+} __do_const;
41035
41036 static struct pstate_adjust_policy pid_params;
41037-static struct pstate_funcs pstate_funcs;
41038+static struct pstate_funcs *pstate_funcs;
41039
41040 struct perf_limits {
41041 int no_turbo;
41042@@ -527,17 +527,17 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
41043
41044 cpu->pstate.current_pstate = pstate;
41045
41046- pstate_funcs.set(cpu, pstate);
41047+ pstate_funcs->set(cpu, pstate);
41048 }
41049
41050 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
41051 {
41052- cpu->pstate.min_pstate = pstate_funcs.get_min();
41053- cpu->pstate.max_pstate = pstate_funcs.get_max();
41054- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
41055+ cpu->pstate.min_pstate = pstate_funcs->get_min();
41056+ cpu->pstate.max_pstate = pstate_funcs->get_max();
41057+ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
41058
41059- if (pstate_funcs.get_vid)
41060- pstate_funcs.get_vid(cpu);
41061+ if (pstate_funcs->get_vid)
41062+ pstate_funcs->get_vid(cpu);
41063 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
41064 }
41065
41066@@ -810,9 +810,9 @@ static int intel_pstate_msrs_not_valid(void)
41067 rdmsrl(MSR_IA32_APERF, aperf);
41068 rdmsrl(MSR_IA32_MPERF, mperf);
41069
41070- if (!pstate_funcs.get_max() ||
41071- !pstate_funcs.get_min() ||
41072- !pstate_funcs.get_turbo())
41073+ if (!pstate_funcs->get_max() ||
41074+ !pstate_funcs->get_min() ||
41075+ !pstate_funcs->get_turbo())
41076 return -ENODEV;
41077
41078 rdmsrl(MSR_IA32_APERF, tmp);
41079@@ -826,7 +826,7 @@ static int intel_pstate_msrs_not_valid(void)
41080 return 0;
41081 }
41082
41083-static void copy_pid_params(struct pstate_adjust_policy *policy)
41084+static void copy_pid_params(const struct pstate_adjust_policy *policy)
41085 {
41086 pid_params.sample_rate_ms = policy->sample_rate_ms;
41087 pid_params.p_gain_pct = policy->p_gain_pct;
41088@@ -838,11 +838,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
41089
41090 static void copy_cpu_funcs(struct pstate_funcs *funcs)
41091 {
41092- pstate_funcs.get_max = funcs->get_max;
41093- pstate_funcs.get_min = funcs->get_min;
41094- pstate_funcs.get_turbo = funcs->get_turbo;
41095- pstate_funcs.set = funcs->set;
41096- pstate_funcs.get_vid = funcs->get_vid;
41097+ pstate_funcs = funcs;
41098 }
41099
41100 #if IS_ENABLED(CONFIG_ACPI)
41101diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
41102index 529cfd9..0e28fff 100644
41103--- a/drivers/cpufreq/p4-clockmod.c
41104+++ b/drivers/cpufreq/p4-clockmod.c
41105@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
41106 case 0x0F: /* Core Duo */
41107 case 0x16: /* Celeron Core */
41108 case 0x1C: /* Atom */
41109- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41110+ pax_open_kernel();
41111+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41112+ pax_close_kernel();
41113 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
41114 case 0x0D: /* Pentium M (Dothan) */
41115- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41116+ pax_open_kernel();
41117+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41118+ pax_close_kernel();
41119 /* fall through */
41120 case 0x09: /* Pentium M (Banias) */
41121 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
41122@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
41123
41124 /* on P-4s, the TSC runs with constant frequency independent whether
41125 * throttling is active or not. */
41126- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41127+ pax_open_kernel();
41128+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
41129+ pax_close_kernel();
41130
41131 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
41132 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
41133diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
41134index 9bb42ba..b01b4a2 100644
41135--- a/drivers/cpufreq/sparc-us3-cpufreq.c
41136+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
41137@@ -18,14 +18,12 @@
41138 #include <asm/head.h>
41139 #include <asm/timer.h>
41140
41141-static struct cpufreq_driver *cpufreq_us3_driver;
41142-
41143 struct us3_freq_percpu_info {
41144 struct cpufreq_frequency_table table[4];
41145 };
41146
41147 /* Indexed by cpu number. */
41148-static struct us3_freq_percpu_info *us3_freq_table;
41149+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
41150
41151 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
41152 * in the Safari config register.
41153@@ -156,16 +154,27 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
41154
41155 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
41156 {
41157- if (cpufreq_us3_driver)
41158- us3_freq_target(policy, 0);
41159+ us3_freq_target(policy, 0);
41160
41161 return 0;
41162 }
41163
41164+static int __init us3_freq_init(void);
41165+static void __exit us3_freq_exit(void);
41166+
41167+static struct cpufreq_driver cpufreq_us3_driver = {
41168+ .init = us3_freq_cpu_init,
41169+ .verify = cpufreq_generic_frequency_table_verify,
41170+ .target_index = us3_freq_target,
41171+ .get = us3_freq_get,
41172+ .exit = us3_freq_cpu_exit,
41173+ .name = "UltraSPARC-III",
41174+
41175+};
41176+
41177 static int __init us3_freq_init(void)
41178 {
41179 unsigned long manuf, impl, ver;
41180- int ret;
41181
41182 if (tlb_type != cheetah && tlb_type != cheetah_plus)
41183 return -ENODEV;
41184@@ -178,55 +187,15 @@ static int __init us3_freq_init(void)
41185 (impl == CHEETAH_IMPL ||
41186 impl == CHEETAH_PLUS_IMPL ||
41187 impl == JAGUAR_IMPL ||
41188- impl == PANTHER_IMPL)) {
41189- struct cpufreq_driver *driver;
41190-
41191- ret = -ENOMEM;
41192- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
41193- if (!driver)
41194- goto err_out;
41195-
41196- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
41197- GFP_KERNEL);
41198- if (!us3_freq_table)
41199- goto err_out;
41200-
41201- driver->init = us3_freq_cpu_init;
41202- driver->verify = cpufreq_generic_frequency_table_verify;
41203- driver->target_index = us3_freq_target;
41204- driver->get = us3_freq_get;
41205- driver->exit = us3_freq_cpu_exit;
41206- strcpy(driver->name, "UltraSPARC-III");
41207-
41208- cpufreq_us3_driver = driver;
41209- ret = cpufreq_register_driver(driver);
41210- if (ret)
41211- goto err_out;
41212-
41213- return 0;
41214-
41215-err_out:
41216- if (driver) {
41217- kfree(driver);
41218- cpufreq_us3_driver = NULL;
41219- }
41220- kfree(us3_freq_table);
41221- us3_freq_table = NULL;
41222- return ret;
41223- }
41224+ impl == PANTHER_IMPL))
41225+ return cpufreq_register_driver(&cpufreq_us3_driver);
41226
41227 return -ENODEV;
41228 }
41229
41230 static void __exit us3_freq_exit(void)
41231 {
41232- if (cpufreq_us3_driver) {
41233- cpufreq_unregister_driver(cpufreq_us3_driver);
41234- kfree(cpufreq_us3_driver);
41235- cpufreq_us3_driver = NULL;
41236- kfree(us3_freq_table);
41237- us3_freq_table = NULL;
41238- }
41239+ cpufreq_unregister_driver(&cpufreq_us3_driver);
41240 }
41241
41242 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
41243diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
41244index 7d4a315..21bb886 100644
41245--- a/drivers/cpufreq/speedstep-centrino.c
41246+++ b/drivers/cpufreq/speedstep-centrino.c
41247@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
41248 !cpu_has(cpu, X86_FEATURE_EST))
41249 return -ENODEV;
41250
41251- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
41252- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
41253+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
41254+ pax_open_kernel();
41255+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
41256+ pax_close_kernel();
41257+ }
41258
41259 if (policy->cpu != 0)
41260 return -ENODEV;
41261diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
41262index e431d11..d0b997e 100644
41263--- a/drivers/cpuidle/driver.c
41264+++ b/drivers/cpuidle/driver.c
41265@@ -194,7 +194,7 @@ static int poll_idle(struct cpuidle_device *dev,
41266
41267 static void poll_idle_init(struct cpuidle_driver *drv)
41268 {
41269- struct cpuidle_state *state = &drv->states[0];
41270+ cpuidle_state_no_const *state = &drv->states[0];
41271
41272 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
41273 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
41274diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
41275index ca89412..a7b9c49 100644
41276--- a/drivers/cpuidle/governor.c
41277+++ b/drivers/cpuidle/governor.c
41278@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
41279 mutex_lock(&cpuidle_lock);
41280 if (__cpuidle_find_governor(gov->name) == NULL) {
41281 ret = 0;
41282- list_add_tail(&gov->governor_list, &cpuidle_governors);
41283+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
41284 if (!cpuidle_curr_governor ||
41285 cpuidle_curr_governor->rating < gov->rating)
41286 cpuidle_switch_governor(gov);
41287diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
41288index 97c5903..023ad23 100644
41289--- a/drivers/cpuidle/sysfs.c
41290+++ b/drivers/cpuidle/sysfs.c
41291@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
41292 NULL
41293 };
41294
41295-static struct attribute_group cpuidle_attr_group = {
41296+static attribute_group_no_const cpuidle_attr_group = {
41297 .attrs = cpuidle_default_attrs,
41298 .name = "cpuidle",
41299 };
41300diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
41301index 8d2a772..33826c9 100644
41302--- a/drivers/crypto/hifn_795x.c
41303+++ b/drivers/crypto/hifn_795x.c
41304@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
41305 MODULE_PARM_DESC(hifn_pll_ref,
41306 "PLL reference clock (pci[freq] or ext[freq], default ext)");
41307
41308-static atomic_t hifn_dev_number;
41309+static atomic_unchecked_t hifn_dev_number;
41310
41311 #define ACRYPTO_OP_DECRYPT 0
41312 #define ACRYPTO_OP_ENCRYPT 1
41313@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
41314 goto err_out_disable_pci_device;
41315
41316 snprintf(name, sizeof(name), "hifn%d",
41317- atomic_inc_return(&hifn_dev_number)-1);
41318+ atomic_inc_return_unchecked(&hifn_dev_number)-1);
41319
41320 err = pci_request_regions(pdev, name);
41321 if (err)
41322diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
41323index 9f90369..bfcacdb 100644
41324--- a/drivers/devfreq/devfreq.c
41325+++ b/drivers/devfreq/devfreq.c
41326@@ -673,7 +673,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
41327 goto err_out;
41328 }
41329
41330- list_add(&governor->node, &devfreq_governor_list);
41331+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
41332
41333 list_for_each_entry(devfreq, &devfreq_list, node) {
41334 int ret = 0;
41335@@ -761,7 +761,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
41336 }
41337 }
41338
41339- list_del(&governor->node);
41340+ pax_list_del((struct list_head *)&governor->node);
41341 err_out:
41342 mutex_unlock(&devfreq_list_lock);
41343
41344diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
41345index 42d4974..2714f36 100644
41346--- a/drivers/dma/sh/shdma-base.c
41347+++ b/drivers/dma/sh/shdma-base.c
41348@@ -228,8 +228,8 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
41349 schan->slave_id = -EINVAL;
41350 }
41351
41352- schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
41353- sdev->desc_size, GFP_KERNEL);
41354+ schan->desc = kcalloc(sdev->desc_size,
41355+ NR_DESCS_PER_CHANNEL, GFP_KERNEL);
41356 if (!schan->desc) {
41357 ret = -ENOMEM;
41358 goto edescalloc;
41359diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
41360index 58eb857..d7e42c8 100644
41361--- a/drivers/dma/sh/shdmac.c
41362+++ b/drivers/dma/sh/shdmac.c
41363@@ -513,7 +513,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
41364 return ret;
41365 }
41366
41367-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
41368+static struct notifier_block sh_dmae_nmi_notifier = {
41369 .notifier_call = sh_dmae_nmi_handler,
41370
41371 /* Run before NMI debug handler and KGDB */
41372diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
41373index 592af5f..bb1d583 100644
41374--- a/drivers/edac/edac_device.c
41375+++ b/drivers/edac/edac_device.c
41376@@ -477,9 +477,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
41377 */
41378 int edac_device_alloc_index(void)
41379 {
41380- static atomic_t device_indexes = ATOMIC_INIT(0);
41381+ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
41382
41383- return atomic_inc_return(&device_indexes) - 1;
41384+ return atomic_inc_return_unchecked(&device_indexes) - 1;
41385 }
41386 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
41387
41388diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
41389index a6cd361..7bdbf53 100644
41390--- a/drivers/edac/edac_mc_sysfs.c
41391+++ b/drivers/edac/edac_mc_sysfs.c
41392@@ -154,7 +154,7 @@ static const char * const edac_caps[] = {
41393 struct dev_ch_attribute {
41394 struct device_attribute attr;
41395 int channel;
41396-};
41397+} __do_const;
41398
41399 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
41400 struct dev_ch_attribute dev_attr_legacy_##_name = \
41401@@ -1011,14 +1011,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
41402 }
41403
41404 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
41405+ pax_open_kernel();
41406 if (mci->get_sdram_scrub_rate) {
41407- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
41408- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
41409+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
41410+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
41411 }
41412 if (mci->set_sdram_scrub_rate) {
41413- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
41414- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
41415+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
41416+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
41417 }
41418+ pax_close_kernel();
41419 err = device_create_file(&mci->dev,
41420 &dev_attr_sdram_scrub_rate);
41421 if (err) {
41422diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
41423index 2cf44b4d..6dd2dc7 100644
41424--- a/drivers/edac/edac_pci.c
41425+++ b/drivers/edac/edac_pci.c
41426@@ -29,7 +29,7 @@
41427
41428 static DEFINE_MUTEX(edac_pci_ctls_mutex);
41429 static LIST_HEAD(edac_pci_list);
41430-static atomic_t pci_indexes = ATOMIC_INIT(0);
41431+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
41432
41433 /*
41434 * edac_pci_alloc_ctl_info
41435@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
41436 */
41437 int edac_pci_alloc_index(void)
41438 {
41439- return atomic_inc_return(&pci_indexes) - 1;
41440+ return atomic_inc_return_unchecked(&pci_indexes) - 1;
41441 }
41442 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
41443
41444diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
41445index e8658e4..22746d6 100644
41446--- a/drivers/edac/edac_pci_sysfs.c
41447+++ b/drivers/edac/edac_pci_sysfs.c
41448@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
41449 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
41450 static int edac_pci_poll_msec = 1000; /* one second workq period */
41451
41452-static atomic_t pci_parity_count = ATOMIC_INIT(0);
41453-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
41454+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
41455+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
41456
41457 static struct kobject *edac_pci_top_main_kobj;
41458 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
41459@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
41460 void *value;
41461 ssize_t(*show) (void *, char *);
41462 ssize_t(*store) (void *, const char *, size_t);
41463-};
41464+} __do_const;
41465
41466 /* Set of show/store abstract level functions for PCI Parity object */
41467 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
41468@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41469 edac_printk(KERN_CRIT, EDAC_PCI,
41470 "Signaled System Error on %s\n",
41471 pci_name(dev));
41472- atomic_inc(&pci_nonparity_count);
41473+ atomic_inc_unchecked(&pci_nonparity_count);
41474 }
41475
41476 if (status & (PCI_STATUS_PARITY)) {
41477@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41478 "Master Data Parity Error on %s\n",
41479 pci_name(dev));
41480
41481- atomic_inc(&pci_parity_count);
41482+ atomic_inc_unchecked(&pci_parity_count);
41483 }
41484
41485 if (status & (PCI_STATUS_DETECTED_PARITY)) {
41486@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41487 "Detected Parity Error on %s\n",
41488 pci_name(dev));
41489
41490- atomic_inc(&pci_parity_count);
41491+ atomic_inc_unchecked(&pci_parity_count);
41492 }
41493 }
41494
41495@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41496 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
41497 "Signaled System Error on %s\n",
41498 pci_name(dev));
41499- atomic_inc(&pci_nonparity_count);
41500+ atomic_inc_unchecked(&pci_nonparity_count);
41501 }
41502
41503 if (status & (PCI_STATUS_PARITY)) {
41504@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41505 "Master Data Parity Error on "
41506 "%s\n", pci_name(dev));
41507
41508- atomic_inc(&pci_parity_count);
41509+ atomic_inc_unchecked(&pci_parity_count);
41510 }
41511
41512 if (status & (PCI_STATUS_DETECTED_PARITY)) {
41513@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
41514 "Detected Parity Error on %s\n",
41515 pci_name(dev));
41516
41517- atomic_inc(&pci_parity_count);
41518+ atomic_inc_unchecked(&pci_parity_count);
41519 }
41520 }
41521 }
41522@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
41523 if (!check_pci_errors)
41524 return;
41525
41526- before_count = atomic_read(&pci_parity_count);
41527+ before_count = atomic_read_unchecked(&pci_parity_count);
41528
41529 /* scan all PCI devices looking for a Parity Error on devices and
41530 * bridges.
41531@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
41532 /* Only if operator has selected panic on PCI Error */
41533 if (edac_pci_get_panic_on_pe()) {
41534 /* If the count is different 'after' from 'before' */
41535- if (before_count != atomic_read(&pci_parity_count))
41536+ if (before_count != atomic_read_unchecked(&pci_parity_count))
41537 panic("EDAC: PCI Parity Error");
41538 }
41539 }
41540diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
41541index 51b7e3a..aa8a3e8 100644
41542--- a/drivers/edac/mce_amd.h
41543+++ b/drivers/edac/mce_amd.h
41544@@ -77,7 +77,7 @@ struct amd_decoder_ops {
41545 bool (*mc0_mce)(u16, u8);
41546 bool (*mc1_mce)(u16, u8);
41547 bool (*mc2_mce)(u16, u8);
41548-};
41549+} __no_const;
41550
41551 void amd_report_gart_errors(bool);
41552 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
41553diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
41554index 57ea7f4..af06b76 100644
41555--- a/drivers/firewire/core-card.c
41556+++ b/drivers/firewire/core-card.c
41557@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
41558 const struct fw_card_driver *driver,
41559 struct device *device)
41560 {
41561- static atomic_t index = ATOMIC_INIT(-1);
41562+ static atomic_unchecked_t index = ATOMIC_INIT(-1);
41563
41564- card->index = atomic_inc_return(&index);
41565+ card->index = atomic_inc_return_unchecked(&index);
41566 card->driver = driver;
41567 card->device = device;
41568 card->current_tlabel = 0;
41569@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
41570
41571 void fw_core_remove_card(struct fw_card *card)
41572 {
41573- struct fw_card_driver dummy_driver = dummy_driver_template;
41574+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
41575
41576 card->driver->update_phy_reg(card, 4,
41577 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
41578diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
41579index 2c6d5e1..a2cca6b 100644
41580--- a/drivers/firewire/core-device.c
41581+++ b/drivers/firewire/core-device.c
41582@@ -253,7 +253,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
41583 struct config_rom_attribute {
41584 struct device_attribute attr;
41585 u32 key;
41586-};
41587+} __do_const;
41588
41589 static ssize_t show_immediate(struct device *dev,
41590 struct device_attribute *dattr, char *buf)
41591diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
41592index eb6935c..3cc2bfa 100644
41593--- a/drivers/firewire/core-transaction.c
41594+++ b/drivers/firewire/core-transaction.c
41595@@ -38,6 +38,7 @@
41596 #include <linux/timer.h>
41597 #include <linux/types.h>
41598 #include <linux/workqueue.h>
41599+#include <linux/sched.h>
41600
41601 #include <asm/byteorder.h>
41602
41603diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
41604index e1480ff6..1a429bd 100644
41605--- a/drivers/firewire/core.h
41606+++ b/drivers/firewire/core.h
41607@@ -111,6 +111,7 @@ struct fw_card_driver {
41608
41609 int (*stop_iso)(struct fw_iso_context *ctx);
41610 };
41611+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
41612
41613 void fw_card_initialize(struct fw_card *card,
41614 const struct fw_card_driver *driver, struct device *device);
41615diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
41616index a66a321..f6caf20 100644
41617--- a/drivers/firewire/ohci.c
41618+++ b/drivers/firewire/ohci.c
41619@@ -2056,10 +2056,12 @@ static void bus_reset_work(struct work_struct *work)
41620 be32_to_cpu(ohci->next_header));
41621 }
41622
41623+#ifndef CONFIG_GRKERNSEC
41624 if (param_remote_dma) {
41625 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
41626 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
41627 }
41628+#endif
41629
41630 spin_unlock_irq(&ohci->lock);
41631
41632@@ -2591,8 +2593,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
41633 unsigned long flags;
41634 int n, ret = 0;
41635
41636+#ifndef CONFIG_GRKERNSEC
41637 if (param_remote_dma)
41638 return 0;
41639+#endif
41640
41641 /*
41642 * FIXME: Make sure this bitmask is cleared when we clear the busReset
41643diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
41644index 94a58a0..f5eba42 100644
41645--- a/drivers/firmware/dmi-id.c
41646+++ b/drivers/firmware/dmi-id.c
41647@@ -16,7 +16,7 @@
41648 struct dmi_device_attribute{
41649 struct device_attribute dev_attr;
41650 int field;
41651-};
41652+} __do_const;
41653 #define to_dmi_dev_attr(_dev_attr) \
41654 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
41655
41656diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
41657index 17afc51..0ef90cd 100644
41658--- a/drivers/firmware/dmi_scan.c
41659+++ b/drivers/firmware/dmi_scan.c
41660@@ -835,7 +835,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
41661 if (buf == NULL)
41662 return -1;
41663
41664- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
41665+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
41666
41667 dmi_unmap(buf);
41668 return 0;
41669diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
41670index 5b53d61..72cee96 100644
41671--- a/drivers/firmware/efi/cper.c
41672+++ b/drivers/firmware/efi/cper.c
41673@@ -44,12 +44,12 @@ static char rcd_decode_str[CPER_REC_LEN];
41674 */
41675 u64 cper_next_record_id(void)
41676 {
41677- static atomic64_t seq;
41678+ static atomic64_unchecked_t seq;
41679
41680- if (!atomic64_read(&seq))
41681- atomic64_set(&seq, ((u64)get_seconds()) << 32);
41682+ if (!atomic64_read_unchecked(&seq))
41683+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
41684
41685- return atomic64_inc_return(&seq);
41686+ return atomic64_inc_return_unchecked(&seq);
41687 }
41688 EXPORT_SYMBOL_GPL(cper_next_record_id);
41689
41690diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
41691index 64ecbb5..d921eb3 100644
41692--- a/drivers/firmware/efi/efi.c
41693+++ b/drivers/firmware/efi/efi.c
41694@@ -126,14 +126,16 @@ static struct attribute_group efi_subsys_attr_group = {
41695 };
41696
41697 static struct efivars generic_efivars;
41698-static struct efivar_operations generic_ops;
41699+static efivar_operations_no_const generic_ops __read_only;
41700
41701 static int generic_ops_register(void)
41702 {
41703- generic_ops.get_variable = efi.get_variable;
41704- generic_ops.set_variable = efi.set_variable;
41705- generic_ops.get_next_variable = efi.get_next_variable;
41706- generic_ops.query_variable_store = efi_query_variable_store;
41707+ pax_open_kernel();
41708+ *(void **)&generic_ops.get_variable = efi.get_variable;
41709+ *(void **)&generic_ops.set_variable = efi.set_variable;
41710+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
41711+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
41712+ pax_close_kernel();
41713
41714 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
41715 }
41716diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
41717index f256ecd..387dcb1 100644
41718--- a/drivers/firmware/efi/efivars.c
41719+++ b/drivers/firmware/efi/efivars.c
41720@@ -589,7 +589,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
41721 static int
41722 create_efivars_bin_attributes(void)
41723 {
41724- struct bin_attribute *attr;
41725+ bin_attribute_no_const *attr;
41726 int error;
41727
41728 /* new_var */
41729diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
41730index 2f569aa..c95f4fb 100644
41731--- a/drivers/firmware/google/memconsole.c
41732+++ b/drivers/firmware/google/memconsole.c
41733@@ -155,7 +155,10 @@ static int __init memconsole_init(void)
41734 if (!found_memconsole())
41735 return -ENODEV;
41736
41737- memconsole_bin_attr.size = memconsole_length;
41738+ pax_open_kernel();
41739+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
41740+ pax_close_kernel();
41741+
41742 return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
41743 }
41744
41745diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
41746index fe49ec3..1ade794 100644
41747--- a/drivers/gpio/gpio-em.c
41748+++ b/drivers/gpio/gpio-em.c
41749@@ -278,7 +278,7 @@ static int em_gio_probe(struct platform_device *pdev)
41750 struct em_gio_priv *p;
41751 struct resource *io[2], *irq[2];
41752 struct gpio_chip *gpio_chip;
41753- struct irq_chip *irq_chip;
41754+ irq_chip_no_const *irq_chip;
41755 const char *name = dev_name(&pdev->dev);
41756 int ret;
41757
41758diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
41759index 3784e81..73637b5 100644
41760--- a/drivers/gpio/gpio-ich.c
41761+++ b/drivers/gpio/gpio-ich.c
41762@@ -94,7 +94,7 @@ struct ichx_desc {
41763 * this option allows driver caching written output values
41764 */
41765 bool use_outlvl_cache;
41766-};
41767+} __do_const;
41768
41769 static struct {
41770 spinlock_t lock;
41771diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
41772index bf6c094..6573caf 100644
41773--- a/drivers/gpio/gpio-rcar.c
41774+++ b/drivers/gpio/gpio-rcar.c
41775@@ -357,7 +357,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
41776 struct gpio_rcar_priv *p;
41777 struct resource *io, *irq;
41778 struct gpio_chip *gpio_chip;
41779- struct irq_chip *irq_chip;
41780+ irq_chip_no_const *irq_chip;
41781 struct device *dev = &pdev->dev;
41782 const char *name = dev_name(dev);
41783 int ret;
41784diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
41785index dbf28fa..04dad4e 100644
41786--- a/drivers/gpio/gpio-vr41xx.c
41787+++ b/drivers/gpio/gpio-vr41xx.c
41788@@ -224,7 +224,7 @@ static int giu_get_irq(unsigned int irq)
41789 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
41790 maskl, pendl, maskh, pendh);
41791
41792- atomic_inc(&irq_err_count);
41793+ atomic_inc_unchecked(&irq_err_count);
41794
41795 return -EINVAL;
41796 }
41797diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
41798index c68d037..2f4f9a9 100644
41799--- a/drivers/gpio/gpiolib.c
41800+++ b/drivers/gpio/gpiolib.c
41801@@ -529,8 +529,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
41802 }
41803
41804 if (gpiochip->irqchip) {
41805- gpiochip->irqchip->irq_request_resources = NULL;
41806- gpiochip->irqchip->irq_release_resources = NULL;
41807+ pax_open_kernel();
41808+ *(void **)&gpiochip->irqchip->irq_request_resources = NULL;
41809+ *(void **)&gpiochip->irqchip->irq_release_resources = NULL;
41810+ pax_close_kernel();
41811 gpiochip->irqchip = NULL;
41812 }
41813 }
41814@@ -596,8 +598,11 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
41815 gpiochip->irqchip = NULL;
41816 return -EINVAL;
41817 }
41818- irqchip->irq_request_resources = gpiochip_irq_reqres;
41819- irqchip->irq_release_resources = gpiochip_irq_relres;
41820+
41821+ pax_open_kernel();
41822+ *(void **)&irqchip->irq_request_resources = gpiochip_irq_reqres;
41823+ *(void **)&irqchip->irq_release_resources = gpiochip_irq_relres;
41824+ pax_close_kernel();
41825
41826 /*
41827 * Prepare the mapping since the irqchip shall be orthogonal to
41828diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
41829index 90e7730..3b41807 100644
41830--- a/drivers/gpu/drm/drm_crtc.c
41831+++ b/drivers/gpu/drm/drm_crtc.c
41832@@ -3861,7 +3861,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
41833 goto done;
41834 }
41835
41836- if (copy_to_user(&enum_ptr[copied].name,
41837+ if (copy_to_user(enum_ptr[copied].name,
41838 &prop_enum->name, DRM_PROP_NAME_LEN)) {
41839 ret = -EFAULT;
41840 goto done;
41841diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
41842index 3242e20..7e4f621 100644
41843--- a/drivers/gpu/drm/drm_drv.c
41844+++ b/drivers/gpu/drm/drm_drv.c
41845@@ -463,7 +463,7 @@ void drm_unplug_dev(struct drm_device *dev)
41846
41847 drm_device_set_unplugged(dev);
41848
41849- if (dev->open_count == 0) {
41850+ if (local_read(&dev->open_count) == 0) {
41851 drm_put_dev(dev);
41852 }
41853 mutex_unlock(&drm_global_mutex);
41854diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
41855index 79d5221..7ff73496 100644
41856--- a/drivers/gpu/drm/drm_fops.c
41857+++ b/drivers/gpu/drm/drm_fops.c
41858@@ -89,7 +89,7 @@ int drm_open(struct inode *inode, struct file *filp)
41859 return PTR_ERR(minor);
41860
41861 dev = minor->dev;
41862- if (!dev->open_count++)
41863+ if (local_inc_return(&dev->open_count) == 1)
41864 need_setup = 1;
41865
41866 /* share address_space across all char-devs of a single device */
41867@@ -106,7 +106,7 @@ int drm_open(struct inode *inode, struct file *filp)
41868 return 0;
41869
41870 err_undo:
41871- dev->open_count--;
41872+ local_dec(&dev->open_count);
41873 drm_minor_release(minor);
41874 return retcode;
41875 }
41876@@ -384,7 +384,7 @@ int drm_release(struct inode *inode, struct file *filp)
41877
41878 mutex_lock(&drm_global_mutex);
41879
41880- DRM_DEBUG("open_count = %d\n", dev->open_count);
41881+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
41882
41883 mutex_lock(&dev->struct_mutex);
41884 list_del(&file_priv->lhead);
41885@@ -397,10 +397,10 @@ int drm_release(struct inode *inode, struct file *filp)
41886 * Begin inline drm_release
41887 */
41888
41889- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
41890+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
41891 task_pid_nr(current),
41892 (long)old_encode_dev(file_priv->minor->kdev->devt),
41893- dev->open_count);
41894+ local_read(&dev->open_count));
41895
41896 /* Release any auth tokens that might point to this file_priv,
41897 (do that under the drm_global_mutex) */
41898@@ -471,7 +471,7 @@ int drm_release(struct inode *inode, struct file *filp)
41899 * End inline drm_release
41900 */
41901
41902- if (!--dev->open_count) {
41903+ if (local_dec_and_test(&dev->open_count)) {
41904 retcode = drm_lastclose(dev);
41905 if (drm_device_is_unplugged(dev))
41906 drm_put_dev(dev);
41907diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
41908index 3d2e91c..d31c4c9 100644
41909--- a/drivers/gpu/drm/drm_global.c
41910+++ b/drivers/gpu/drm/drm_global.c
41911@@ -36,7 +36,7 @@
41912 struct drm_global_item {
41913 struct mutex mutex;
41914 void *object;
41915- int refcount;
41916+ atomic_t refcount;
41917 };
41918
41919 static struct drm_global_item glob[DRM_GLOBAL_NUM];
41920@@ -49,7 +49,7 @@ void drm_global_init(void)
41921 struct drm_global_item *item = &glob[i];
41922 mutex_init(&item->mutex);
41923 item->object = NULL;
41924- item->refcount = 0;
41925+ atomic_set(&item->refcount, 0);
41926 }
41927 }
41928
41929@@ -59,7 +59,7 @@ void drm_global_release(void)
41930 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
41931 struct drm_global_item *item = &glob[i];
41932 BUG_ON(item->object != NULL);
41933- BUG_ON(item->refcount != 0);
41934+ BUG_ON(atomic_read(&item->refcount) != 0);
41935 }
41936 }
41937
41938@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
41939 struct drm_global_item *item = &glob[ref->global_type];
41940
41941 mutex_lock(&item->mutex);
41942- if (item->refcount == 0) {
41943+ if (atomic_read(&item->refcount) == 0) {
41944 item->object = kzalloc(ref->size, GFP_KERNEL);
41945 if (unlikely(item->object == NULL)) {
41946 ret = -ENOMEM;
41947@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
41948 goto out_err;
41949
41950 }
41951- ++item->refcount;
41952+ atomic_inc(&item->refcount);
41953 ref->object = item->object;
41954 mutex_unlock(&item->mutex);
41955 return 0;
41956@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
41957 struct drm_global_item *item = &glob[ref->global_type];
41958
41959 mutex_lock(&item->mutex);
41960- BUG_ON(item->refcount == 0);
41961+ BUG_ON(atomic_read(&item->refcount) == 0);
41962 BUG_ON(ref->object != item->object);
41963- if (--item->refcount == 0) {
41964+ if (atomic_dec_and_test(&item->refcount)) {
41965 ref->release(ref);
41966 item->object = NULL;
41967 }
41968diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
41969index ecaf0fa..a49cee9 100644
41970--- a/drivers/gpu/drm/drm_info.c
41971+++ b/drivers/gpu/drm/drm_info.c
41972@@ -73,10 +73,13 @@ int drm_vm_info(struct seq_file *m, void *data)
41973 struct drm_local_map *map;
41974 struct drm_map_list *r_list;
41975
41976- /* Hardcoded from _DRM_FRAME_BUFFER,
41977- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
41978- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
41979- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
41980+ static const char * const types[] = {
41981+ [_DRM_FRAME_BUFFER] = "FB",
41982+ [_DRM_REGISTERS] = "REG",
41983+ [_DRM_SHM] = "SHM",
41984+ [_DRM_AGP] = "AGP",
41985+ [_DRM_SCATTER_GATHER] = "SG",
41986+ [_DRM_CONSISTENT] = "PCI"};
41987 const char *type;
41988 int i;
41989
41990@@ -87,7 +90,7 @@ int drm_vm_info(struct seq_file *m, void *data)
41991 map = r_list->map;
41992 if (!map)
41993 continue;
41994- if (map->type < 0 || map->type > 5)
41995+ if (map->type >= ARRAY_SIZE(types))
41996 type = "??";
41997 else
41998 type = types[map->type];
41999@@ -259,7 +262,11 @@ int drm_vma_info(struct seq_file *m, void *data)
42000 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
42001 vma->vm_flags & VM_LOCKED ? 'l' : '-',
42002 vma->vm_flags & VM_IO ? 'i' : '-',
42003+#ifdef CONFIG_GRKERNSEC_HIDESYM
42004+ 0);
42005+#else
42006 vma->vm_pgoff);
42007+#endif
42008
42009 #if defined(__i386__)
42010 pgprot = pgprot_val(vma->vm_page_prot);
42011diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
42012index 2f4c4343..dd12cd2 100644
42013--- a/drivers/gpu/drm/drm_ioc32.c
42014+++ b/drivers/gpu/drm/drm_ioc32.c
42015@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
42016 request = compat_alloc_user_space(nbytes);
42017 if (!access_ok(VERIFY_WRITE, request, nbytes))
42018 return -EFAULT;
42019- list = (struct drm_buf_desc *) (request + 1);
42020+ list = (struct drm_buf_desc __user *) (request + 1);
42021
42022 if (__put_user(count, &request->count)
42023 || __put_user(list, &request->list))
42024@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
42025 request = compat_alloc_user_space(nbytes);
42026 if (!access_ok(VERIFY_WRITE, request, nbytes))
42027 return -EFAULT;
42028- list = (struct drm_buf_pub *) (request + 1);
42029+ list = (struct drm_buf_pub __user *) (request + 1);
42030
42031 if (__put_user(count, &request->count)
42032 || __put_user(list, &request->list))
42033@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
42034 return 0;
42035 }
42036
42037-drm_ioctl_compat_t *drm_compat_ioctls[] = {
42038+drm_ioctl_compat_t drm_compat_ioctls[] = {
42039 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
42040 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
42041 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
42042@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
42043 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42044 {
42045 unsigned int nr = DRM_IOCTL_NR(cmd);
42046- drm_ioctl_compat_t *fn;
42047 int ret;
42048
42049 /* Assume that ioctls without an explicit compat routine will just
42050@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42051 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
42052 return drm_ioctl(filp, cmd, arg);
42053
42054- fn = drm_compat_ioctls[nr];
42055-
42056- if (fn != NULL)
42057- ret = (*fn) (filp, cmd, arg);
42058+ if (drm_compat_ioctls[nr] != NULL)
42059+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
42060 else
42061 ret = drm_ioctl(filp, cmd, arg);
42062
42063diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
42064index 40be746..fd78faf 100644
42065--- a/drivers/gpu/drm/drm_ioctl.c
42066+++ b/drivers/gpu/drm/drm_ioctl.c
42067@@ -642,7 +642,7 @@ long drm_ioctl(struct file *filp,
42068 struct drm_file *file_priv = filp->private_data;
42069 struct drm_device *dev;
42070 const struct drm_ioctl_desc *ioctl = NULL;
42071- drm_ioctl_t *func;
42072+ drm_ioctl_no_const_t func;
42073 unsigned int nr = DRM_IOCTL_NR(cmd);
42074 int retcode = -EINVAL;
42075 char stack_kdata[128];
42076diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
42077index d4d16ed..8fb0b51 100644
42078--- a/drivers/gpu/drm/i810/i810_drv.h
42079+++ b/drivers/gpu/drm/i810/i810_drv.h
42080@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
42081 int page_flipping;
42082
42083 wait_queue_head_t irq_queue;
42084- atomic_t irq_received;
42085- atomic_t irq_emitted;
42086+ atomic_unchecked_t irq_received;
42087+ atomic_unchecked_t irq_emitted;
42088
42089 int front_offset;
42090 } drm_i810_private_t;
42091diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
42092index 9933c26..32cc097 100644
42093--- a/drivers/gpu/drm/i915/i915_dma.c
42094+++ b/drivers/gpu/drm/i915/i915_dma.c
42095@@ -1292,7 +1292,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
42096 * locking inversion with the driver load path. And the access here is
42097 * completely racy anyway. So don't bother with locking for now.
42098 */
42099- return dev->open_count == 0;
42100+ return local_read(&dev->open_count) == 0;
42101 }
42102
42103 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
42104diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
42105index 60998fc..3b244bc 100644
42106--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
42107+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
42108@@ -891,9 +891,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
42109
42110 static int
42111 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
42112- int count)
42113+ unsigned int count)
42114 {
42115- int i;
42116+ unsigned int i;
42117 unsigned relocs_total = 0;
42118 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
42119
42120diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
42121index 2e0613e..a8b94d9 100644
42122--- a/drivers/gpu/drm/i915/i915_ioc32.c
42123+++ b/drivers/gpu/drm/i915/i915_ioc32.c
42124@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
42125 (unsigned long)request);
42126 }
42127
42128-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
42129+static drm_ioctl_compat_t i915_compat_ioctls[] = {
42130 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
42131 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
42132 [DRM_I915_GETPARAM] = compat_i915_getparam,
42133@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
42134 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42135 {
42136 unsigned int nr = DRM_IOCTL_NR(cmd);
42137- drm_ioctl_compat_t *fn = NULL;
42138 int ret;
42139
42140 if (nr < DRM_COMMAND_BASE)
42141 return drm_compat_ioctl(filp, cmd, arg);
42142
42143- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
42144- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
42145-
42146- if (fn != NULL)
42147+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls)) {
42148+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
42149 ret = (*fn) (filp, cmd, arg);
42150- else
42151+ } else
42152 ret = drm_ioctl(filp, cmd, arg);
42153
42154 return ret;
42155diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
42156index d8324c6..fc9b704 100644
42157--- a/drivers/gpu/drm/i915/intel_display.c
42158+++ b/drivers/gpu/drm/i915/intel_display.c
42159@@ -12437,13 +12437,13 @@ struct intel_quirk {
42160 int subsystem_vendor;
42161 int subsystem_device;
42162 void (*hook)(struct drm_device *dev);
42163-};
42164+} __do_const;
42165
42166 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
42167 struct intel_dmi_quirk {
42168 void (*hook)(struct drm_device *dev);
42169 const struct dmi_system_id (*dmi_id_list)[];
42170-};
42171+} __do_const;
42172
42173 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
42174 {
42175@@ -12451,18 +12451,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
42176 return 1;
42177 }
42178
42179-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
42180+static const struct dmi_system_id intel_dmi_quirks_table[] = {
42181 {
42182- .dmi_id_list = &(const struct dmi_system_id[]) {
42183- {
42184- .callback = intel_dmi_reverse_brightness,
42185- .ident = "NCR Corporation",
42186- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
42187- DMI_MATCH(DMI_PRODUCT_NAME, ""),
42188- },
42189- },
42190- { } /* terminating entry */
42191+ .callback = intel_dmi_reverse_brightness,
42192+ .ident = "NCR Corporation",
42193+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
42194+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
42195 },
42196+ },
42197+ { } /* terminating entry */
42198+};
42199+
42200+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
42201+ {
42202+ .dmi_id_list = &intel_dmi_quirks_table,
42203 .hook = quirk_invert_brightness,
42204 },
42205 };
42206diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
42207index fe45321..836fdca 100644
42208--- a/drivers/gpu/drm/mga/mga_drv.h
42209+++ b/drivers/gpu/drm/mga/mga_drv.h
42210@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
42211 u32 clear_cmd;
42212 u32 maccess;
42213
42214- atomic_t vbl_received; /**< Number of vblanks received. */
42215+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
42216 wait_queue_head_t fence_queue;
42217- atomic_t last_fence_retired;
42218+ atomic_unchecked_t last_fence_retired;
42219 u32 next_fence_to_post;
42220
42221 unsigned int fb_cpp;
42222diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
42223index 729bfd5..ead8823 100644
42224--- a/drivers/gpu/drm/mga/mga_ioc32.c
42225+++ b/drivers/gpu/drm/mga/mga_ioc32.c
42226@@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
42227 return 0;
42228 }
42229
42230-drm_ioctl_compat_t *mga_compat_ioctls[] = {
42231+drm_ioctl_compat_t mga_compat_ioctls[] = {
42232 [DRM_MGA_INIT] = compat_mga_init,
42233 [DRM_MGA_GETPARAM] = compat_mga_getparam,
42234 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
42235@@ -208,18 +208,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
42236 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42237 {
42238 unsigned int nr = DRM_IOCTL_NR(cmd);
42239- drm_ioctl_compat_t *fn = NULL;
42240 int ret;
42241
42242 if (nr < DRM_COMMAND_BASE)
42243 return drm_compat_ioctl(filp, cmd, arg);
42244
42245- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
42246- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
42247-
42248- if (fn != NULL)
42249+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls)) {
42250+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
42251 ret = (*fn) (filp, cmd, arg);
42252- else
42253+ } else
42254 ret = drm_ioctl(filp, cmd, arg);
42255
42256 return ret;
42257diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
42258index 1b071b8..de8601a 100644
42259--- a/drivers/gpu/drm/mga/mga_irq.c
42260+++ b/drivers/gpu/drm/mga/mga_irq.c
42261@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
42262 if (crtc != 0)
42263 return 0;
42264
42265- return atomic_read(&dev_priv->vbl_received);
42266+ return atomic_read_unchecked(&dev_priv->vbl_received);
42267 }
42268
42269
42270@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
42271 /* VBLANK interrupt */
42272 if (status & MGA_VLINEPEN) {
42273 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
42274- atomic_inc(&dev_priv->vbl_received);
42275+ atomic_inc_unchecked(&dev_priv->vbl_received);
42276 drm_handle_vblank(dev, 0);
42277 handled = 1;
42278 }
42279@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
42280 if ((prim_start & ~0x03) != (prim_end & ~0x03))
42281 MGA_WRITE(MGA_PRIMEND, prim_end);
42282
42283- atomic_inc(&dev_priv->last_fence_retired);
42284+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
42285 wake_up(&dev_priv->fence_queue);
42286 handled = 1;
42287 }
42288@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
42289 * using fences.
42290 */
42291 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
42292- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
42293+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
42294 - *sequence) <= (1 << 23)));
42295
42296 *sequence = cur_fence;
42297diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
42298index dae2c96..324dbe4 100644
42299--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
42300+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
42301@@ -963,7 +963,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
42302 struct bit_table {
42303 const char id;
42304 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
42305-};
42306+} __no_const;
42307
42308 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
42309
42310diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
42311index b02b024..aed7bad 100644
42312--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
42313+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
42314@@ -119,7 +119,6 @@ struct nouveau_drm {
42315 struct drm_global_reference mem_global_ref;
42316 struct ttm_bo_global_ref bo_global_ref;
42317 struct ttm_bo_device bdev;
42318- atomic_t validate_sequence;
42319 int (*move)(struct nouveau_channel *,
42320 struct ttm_buffer_object *,
42321 struct ttm_mem_reg *, struct ttm_mem_reg *);
42322diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
42323index 462679a..88e32a7 100644
42324--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
42325+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
42326@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
42327 unsigned long arg)
42328 {
42329 unsigned int nr = DRM_IOCTL_NR(cmd);
42330- drm_ioctl_compat_t *fn = NULL;
42331+ drm_ioctl_compat_t fn = NULL;
42332 int ret;
42333
42334 if (nr < DRM_COMMAND_BASE)
42335diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
42336index 53874b7..1db0a68 100644
42337--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
42338+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
42339@@ -127,11 +127,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
42340 }
42341
42342 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
42343- nouveau_vram_manager_init,
42344- nouveau_vram_manager_fini,
42345- nouveau_vram_manager_new,
42346- nouveau_vram_manager_del,
42347- nouveau_vram_manager_debug
42348+ .init = nouveau_vram_manager_init,
42349+ .takedown = nouveau_vram_manager_fini,
42350+ .get_node = nouveau_vram_manager_new,
42351+ .put_node = nouveau_vram_manager_del,
42352+ .debug = nouveau_vram_manager_debug
42353 };
42354
42355 static int
42356@@ -196,11 +196,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
42357 }
42358
42359 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
42360- nouveau_gart_manager_init,
42361- nouveau_gart_manager_fini,
42362- nouveau_gart_manager_new,
42363- nouveau_gart_manager_del,
42364- nouveau_gart_manager_debug
42365+ .init = nouveau_gart_manager_init,
42366+ .takedown = nouveau_gart_manager_fini,
42367+ .get_node = nouveau_gart_manager_new,
42368+ .put_node = nouveau_gart_manager_del,
42369+ .debug = nouveau_gart_manager_debug
42370 };
42371
42372 /*XXX*/
42373@@ -270,11 +270,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
42374 }
42375
42376 const struct ttm_mem_type_manager_func nv04_gart_manager = {
42377- nv04_gart_manager_init,
42378- nv04_gart_manager_fini,
42379- nv04_gart_manager_new,
42380- nv04_gart_manager_del,
42381- nv04_gart_manager_debug
42382+ .init = nv04_gart_manager_init,
42383+ .takedown = nv04_gart_manager_fini,
42384+ .get_node = nv04_gart_manager_new,
42385+ .put_node = nv04_gart_manager_del,
42386+ .debug = nv04_gart_manager_debug
42387 };
42388
42389 int
42390diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
42391index c7592ec..dd45ebc 100644
42392--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
42393+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
42394@@ -72,7 +72,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
42395 * locking inversion with the driver load path. And the access here is
42396 * completely racy anyway. So don't bother with locking for now.
42397 */
42398- return dev->open_count == 0;
42399+ return local_read(&dev->open_count) == 0;
42400 }
42401
42402 static const struct vga_switcheroo_client_ops
42403diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
42404index eb89653..613cf71 100644
42405--- a/drivers/gpu/drm/qxl/qxl_cmd.c
42406+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
42407@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
42408 int ret;
42409
42410 mutex_lock(&qdev->async_io_mutex);
42411- irq_num = atomic_read(&qdev->irq_received_io_cmd);
42412+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
42413 if (qdev->last_sent_io_cmd > irq_num) {
42414 if (intr)
42415 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
42416- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42417+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42418 else
42419 ret = wait_event_timeout(qdev->io_cmd_event,
42420- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42421+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42422 /* 0 is timeout, just bail the "hw" has gone away */
42423 if (ret <= 0)
42424 goto out;
42425- irq_num = atomic_read(&qdev->irq_received_io_cmd);
42426+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
42427 }
42428 outb(val, addr);
42429 qdev->last_sent_io_cmd = irq_num + 1;
42430 if (intr)
42431 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
42432- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42433+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42434 else
42435 ret = wait_event_timeout(qdev->io_cmd_event,
42436- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42437+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
42438 out:
42439 if (ret > 0)
42440 ret = 0;
42441diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
42442index c3c2bbd..bc3c0fb 100644
42443--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
42444+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
42445@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
42446 struct drm_info_node *node = (struct drm_info_node *) m->private;
42447 struct qxl_device *qdev = node->minor->dev->dev_private;
42448
42449- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
42450- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
42451- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
42452- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
42453+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
42454+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
42455+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
42456+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
42457 seq_printf(m, "%d\n", qdev->irq_received_error);
42458 return 0;
42459 }
42460diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
42461index 36ed40b..0397633 100644
42462--- a/drivers/gpu/drm/qxl/qxl_drv.h
42463+++ b/drivers/gpu/drm/qxl/qxl_drv.h
42464@@ -290,10 +290,10 @@ struct qxl_device {
42465 unsigned int last_sent_io_cmd;
42466
42467 /* interrupt handling */
42468- atomic_t irq_received;
42469- atomic_t irq_received_display;
42470- atomic_t irq_received_cursor;
42471- atomic_t irq_received_io_cmd;
42472+ atomic_unchecked_t irq_received;
42473+ atomic_unchecked_t irq_received_display;
42474+ atomic_unchecked_t irq_received_cursor;
42475+ atomic_unchecked_t irq_received_io_cmd;
42476 unsigned irq_received_error;
42477 wait_queue_head_t display_event;
42478 wait_queue_head_t cursor_event;
42479diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
42480index b110883..dd06418 100644
42481--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
42482+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
42483@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
42484
42485 /* TODO copy slow path code from i915 */
42486 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
42487- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
42488+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
42489
42490 {
42491 struct qxl_drawable *draw = fb_cmd;
42492@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
42493 struct drm_qxl_reloc reloc;
42494
42495 if (copy_from_user(&reloc,
42496- &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
42497+ &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
42498 sizeof(reloc))) {
42499 ret = -EFAULT;
42500 goto out_free_bos;
42501@@ -294,10 +294,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
42502
42503 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
42504
42505- struct drm_qxl_command *commands =
42506- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
42507+ struct drm_qxl_command __user *commands =
42508+ (struct drm_qxl_command __user *)(uintptr_t)execbuffer->commands;
42509
42510- if (copy_from_user(&user_cmd, &commands[cmd_num],
42511+ if (copy_from_user(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
42512 sizeof(user_cmd)))
42513 return -EFAULT;
42514
42515diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
42516index 0bf1e20..42a7310 100644
42517--- a/drivers/gpu/drm/qxl/qxl_irq.c
42518+++ b/drivers/gpu/drm/qxl/qxl_irq.c
42519@@ -36,19 +36,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
42520 if (!pending)
42521 return IRQ_NONE;
42522
42523- atomic_inc(&qdev->irq_received);
42524+ atomic_inc_unchecked(&qdev->irq_received);
42525
42526 if (pending & QXL_INTERRUPT_DISPLAY) {
42527- atomic_inc(&qdev->irq_received_display);
42528+ atomic_inc_unchecked(&qdev->irq_received_display);
42529 wake_up_all(&qdev->display_event);
42530 qxl_queue_garbage_collect(qdev, false);
42531 }
42532 if (pending & QXL_INTERRUPT_CURSOR) {
42533- atomic_inc(&qdev->irq_received_cursor);
42534+ atomic_inc_unchecked(&qdev->irq_received_cursor);
42535 wake_up_all(&qdev->cursor_event);
42536 }
42537 if (pending & QXL_INTERRUPT_IO_CMD) {
42538- atomic_inc(&qdev->irq_received_io_cmd);
42539+ atomic_inc_unchecked(&qdev->irq_received_io_cmd);
42540 wake_up_all(&qdev->io_cmd_event);
42541 }
42542 if (pending & QXL_INTERRUPT_ERROR) {
42543@@ -85,10 +85,10 @@ int qxl_irq_init(struct qxl_device *qdev)
42544 init_waitqueue_head(&qdev->io_cmd_event);
42545 INIT_WORK(&qdev->client_monitors_config_work,
42546 qxl_client_monitors_config_work_func);
42547- atomic_set(&qdev->irq_received, 0);
42548- atomic_set(&qdev->irq_received_display, 0);
42549- atomic_set(&qdev->irq_received_cursor, 0);
42550- atomic_set(&qdev->irq_received_io_cmd, 0);
42551+ atomic_set_unchecked(&qdev->irq_received, 0);
42552+ atomic_set_unchecked(&qdev->irq_received_display, 0);
42553+ atomic_set_unchecked(&qdev->irq_received_cursor, 0);
42554+ atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
42555 qdev->irq_received_error = 0;
42556 ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
42557 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
42558diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
42559index 71a1bae..cb1f103 100644
42560--- a/drivers/gpu/drm/qxl/qxl_ttm.c
42561+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
42562@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
42563 }
42564 }
42565
42566-static struct vm_operations_struct qxl_ttm_vm_ops;
42567+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
42568 static const struct vm_operations_struct *ttm_vm_ops;
42569
42570 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42571@@ -145,8 +145,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
42572 return r;
42573 if (unlikely(ttm_vm_ops == NULL)) {
42574 ttm_vm_ops = vma->vm_ops;
42575+ pax_open_kernel();
42576 qxl_ttm_vm_ops = *ttm_vm_ops;
42577 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
42578+ pax_close_kernel();
42579 }
42580 vma->vm_ops = &qxl_ttm_vm_ops;
42581 return 0;
42582@@ -555,25 +557,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
42583 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
42584 {
42585 #if defined(CONFIG_DEBUG_FS)
42586- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
42587- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
42588- unsigned i;
42589+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
42590+ {
42591+ .name = "qxl_mem_mm",
42592+ .show = &qxl_mm_dump_table,
42593+ },
42594+ {
42595+ .name = "qxl_surf_mm",
42596+ .show = &qxl_mm_dump_table,
42597+ }
42598+ };
42599
42600- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
42601- if (i == 0)
42602- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
42603- else
42604- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
42605- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
42606- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
42607- qxl_mem_types_list[i].driver_features = 0;
42608- if (i == 0)
42609- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
42610- else
42611- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
42612+ pax_open_kernel();
42613+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
42614+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
42615+ pax_close_kernel();
42616
42617- }
42618- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
42619+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
42620 #else
42621 return 0;
42622 #endif
42623diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
42624index 59459fe..be26b31 100644
42625--- a/drivers/gpu/drm/r128/r128_cce.c
42626+++ b/drivers/gpu/drm/r128/r128_cce.c
42627@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
42628
42629 /* GH: Simple idle check.
42630 */
42631- atomic_set(&dev_priv->idle_count, 0);
42632+ atomic_set_unchecked(&dev_priv->idle_count, 0);
42633
42634 /* We don't support anything other than bus-mastering ring mode,
42635 * but the ring can be in either AGP or PCI space for the ring
42636diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
42637index 5bf3f5f..7000661 100644
42638--- a/drivers/gpu/drm/r128/r128_drv.h
42639+++ b/drivers/gpu/drm/r128/r128_drv.h
42640@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
42641 int is_pci;
42642 unsigned long cce_buffers_offset;
42643
42644- atomic_t idle_count;
42645+ atomic_unchecked_t idle_count;
42646
42647 int page_flipping;
42648 int current_page;
42649 u32 crtc_offset;
42650 u32 crtc_offset_cntl;
42651
42652- atomic_t vbl_received;
42653+ atomic_unchecked_t vbl_received;
42654
42655 u32 color_fmt;
42656 unsigned int front_offset;
42657diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
42658index 663f38c..c689495 100644
42659--- a/drivers/gpu/drm/r128/r128_ioc32.c
42660+++ b/drivers/gpu/drm/r128/r128_ioc32.c
42661@@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
42662 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
42663 }
42664
42665-drm_ioctl_compat_t *r128_compat_ioctls[] = {
42666+drm_ioctl_compat_t r128_compat_ioctls[] = {
42667 [DRM_R128_INIT] = compat_r128_init,
42668 [DRM_R128_DEPTH] = compat_r128_depth,
42669 [DRM_R128_STIPPLE] = compat_r128_stipple,
42670@@ -197,18 +197,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
42671 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42672 {
42673 unsigned int nr = DRM_IOCTL_NR(cmd);
42674- drm_ioctl_compat_t *fn = NULL;
42675 int ret;
42676
42677 if (nr < DRM_COMMAND_BASE)
42678 return drm_compat_ioctl(filp, cmd, arg);
42679
42680- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls))
42681- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
42682-
42683- if (fn != NULL)
42684+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls)) {
42685+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
42686 ret = (*fn) (filp, cmd, arg);
42687- else
42688+ } else
42689 ret = drm_ioctl(filp, cmd, arg);
42690
42691 return ret;
42692diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
42693index c2ae496..30b5993 100644
42694--- a/drivers/gpu/drm/r128/r128_irq.c
42695+++ b/drivers/gpu/drm/r128/r128_irq.c
42696@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
42697 if (crtc != 0)
42698 return 0;
42699
42700- return atomic_read(&dev_priv->vbl_received);
42701+ return atomic_read_unchecked(&dev_priv->vbl_received);
42702 }
42703
42704 irqreturn_t r128_driver_irq_handler(int irq, void *arg)
42705@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
42706 /* VBLANK interrupt */
42707 if (status & R128_CRTC_VBLANK_INT) {
42708 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
42709- atomic_inc(&dev_priv->vbl_received);
42710+ atomic_inc_unchecked(&dev_priv->vbl_received);
42711 drm_handle_vblank(dev, 0);
42712 return IRQ_HANDLED;
42713 }
42714diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
42715index 575e986..66e62ca 100644
42716--- a/drivers/gpu/drm/r128/r128_state.c
42717+++ b/drivers/gpu/drm/r128/r128_state.c
42718@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
42719
42720 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
42721 {
42722- if (atomic_read(&dev_priv->idle_count) == 0)
42723+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
42724 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
42725 else
42726- atomic_set(&dev_priv->idle_count, 0);
42727+ atomic_set_unchecked(&dev_priv->idle_count, 0);
42728 }
42729
42730 #endif
42731diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
42732index 4a85bb6..aaea819 100644
42733--- a/drivers/gpu/drm/radeon/mkregtable.c
42734+++ b/drivers/gpu/drm/radeon/mkregtable.c
42735@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
42736 regex_t mask_rex;
42737 regmatch_t match[4];
42738 char buf[1024];
42739- size_t end;
42740+ long end;
42741 int len;
42742 int done = 0;
42743 int r;
42744 unsigned o;
42745 struct offset *offset;
42746 char last_reg_s[10];
42747- int last_reg;
42748+ unsigned long last_reg;
42749
42750 if (regcomp
42751 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
42752diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
42753index 12c8329..a69e2e8 100644
42754--- a/drivers/gpu/drm/radeon/radeon_device.c
42755+++ b/drivers/gpu/drm/radeon/radeon_device.c
42756@@ -1213,7 +1213,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
42757 * locking inversion with the driver load path. And the access here is
42758 * completely racy anyway. So don't bother with locking for now.
42759 */
42760- return dev->open_count == 0;
42761+ return local_read(&dev->open_count) == 0;
42762 }
42763
42764 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
42765diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
42766index dafd812..1bf20c7 100644
42767--- a/drivers/gpu/drm/radeon/radeon_drv.h
42768+++ b/drivers/gpu/drm/radeon/radeon_drv.h
42769@@ -262,7 +262,7 @@ typedef struct drm_radeon_private {
42770
42771 /* SW interrupt */
42772 wait_queue_head_t swi_queue;
42773- atomic_t swi_emitted;
42774+ atomic_unchecked_t swi_emitted;
42775 int vblank_crtc;
42776 uint32_t irq_enable_reg;
42777 uint32_t r500_disp_irq_reg;
42778diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
42779index 0b98ea1..0881827 100644
42780--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
42781+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
42782@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
42783 request = compat_alloc_user_space(sizeof(*request));
42784 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
42785 || __put_user(req32.param, &request->param)
42786- || __put_user((void __user *)(unsigned long)req32.value,
42787+ || __put_user((unsigned long)req32.value,
42788 &request->value))
42789 return -EFAULT;
42790
42791@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
42792 #define compat_radeon_cp_setparam NULL
42793 #endif /* X86_64 || IA64 */
42794
42795-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
42796+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
42797 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
42798 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
42799 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
42800@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
42801 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42802 {
42803 unsigned int nr = DRM_IOCTL_NR(cmd);
42804- drm_ioctl_compat_t *fn = NULL;
42805 int ret;
42806
42807 if (nr < DRM_COMMAND_BASE)
42808 return drm_compat_ioctl(filp, cmd, arg);
42809
42810- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
42811- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
42812-
42813- if (fn != NULL)
42814+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls)) {
42815+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
42816 ret = (*fn) (filp, cmd, arg);
42817- else
42818+ } else
42819 ret = drm_ioctl(filp, cmd, arg);
42820
42821 return ret;
42822diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
42823index 244b19b..c19226d 100644
42824--- a/drivers/gpu/drm/radeon/radeon_irq.c
42825+++ b/drivers/gpu/drm/radeon/radeon_irq.c
42826@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
42827 unsigned int ret;
42828 RING_LOCALS;
42829
42830- atomic_inc(&dev_priv->swi_emitted);
42831- ret = atomic_read(&dev_priv->swi_emitted);
42832+ atomic_inc_unchecked(&dev_priv->swi_emitted);
42833+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
42834
42835 BEGIN_RING(4);
42836 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
42837@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
42838 drm_radeon_private_t *dev_priv =
42839 (drm_radeon_private_t *) dev->dev_private;
42840
42841- atomic_set(&dev_priv->swi_emitted, 0);
42842+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
42843 init_waitqueue_head(&dev_priv->swi_queue);
42844
42845 dev->max_vblank_count = 0x001fffff;
42846diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
42847index 23bb64f..69d7234 100644
42848--- a/drivers/gpu/drm/radeon/radeon_state.c
42849+++ b/drivers/gpu/drm/radeon/radeon_state.c
42850@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
42851 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
42852 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
42853
42854- if (copy_from_user(&depth_boxes, clear->depth_boxes,
42855+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || copy_from_user(&depth_boxes, clear->depth_boxes,
42856 sarea_priv->nbox * sizeof(depth_boxes[0])))
42857 return -EFAULT;
42858
42859@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
42860 {
42861 drm_radeon_private_t *dev_priv = dev->dev_private;
42862 drm_radeon_getparam_t *param = data;
42863- int value;
42864+ int value = 0;
42865
42866 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
42867
42868diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
42869index 72afe82..056a57a 100644
42870--- a/drivers/gpu/drm/radeon/radeon_ttm.c
42871+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
42872@@ -801,7 +801,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
42873 man->size = size >> PAGE_SHIFT;
42874 }
42875
42876-static struct vm_operations_struct radeon_ttm_vm_ops;
42877+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
42878 static const struct vm_operations_struct *ttm_vm_ops = NULL;
42879
42880 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42881@@ -842,8 +842,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
42882 }
42883 if (unlikely(ttm_vm_ops == NULL)) {
42884 ttm_vm_ops = vma->vm_ops;
42885+ pax_open_kernel();
42886 radeon_ttm_vm_ops = *ttm_vm_ops;
42887 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
42888+ pax_close_kernel();
42889 }
42890 vma->vm_ops = &radeon_ttm_vm_ops;
42891 return 0;
42892diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
42893index 6553fd2..aecd29c 100644
42894--- a/drivers/gpu/drm/tegra/dc.c
42895+++ b/drivers/gpu/drm/tegra/dc.c
42896@@ -1243,7 +1243,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
42897 }
42898
42899 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
42900- dc->debugfs_files[i].data = dc;
42901+ *(void **)&dc->debugfs_files[i].data = dc;
42902
42903 err = drm_debugfs_create_files(dc->debugfs_files,
42904 ARRAY_SIZE(debugfs_files),
42905diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
42906index f787445..2df2c65 100644
42907--- a/drivers/gpu/drm/tegra/dsi.c
42908+++ b/drivers/gpu/drm/tegra/dsi.c
42909@@ -41,7 +41,7 @@ struct tegra_dsi {
42910 struct clk *clk_lp;
42911 struct clk *clk;
42912
42913- struct drm_info_list *debugfs_files;
42914+ drm_info_list_no_const *debugfs_files;
42915 struct drm_minor *minor;
42916 struct dentry *debugfs;
42917
42918diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
42919index ffe2654..03c7b1c 100644
42920--- a/drivers/gpu/drm/tegra/hdmi.c
42921+++ b/drivers/gpu/drm/tegra/hdmi.c
42922@@ -60,7 +60,7 @@ struct tegra_hdmi {
42923 bool stereo;
42924 bool dvi;
42925
42926- struct drm_info_list *debugfs_files;
42927+ drm_info_list_no_const *debugfs_files;
42928 struct drm_minor *minor;
42929 struct dentry *debugfs;
42930 };
42931diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
42932index 9e103a48..0e117f3 100644
42933--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
42934+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
42935@@ -147,10 +147,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
42936 }
42937
42938 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
42939- ttm_bo_man_init,
42940- ttm_bo_man_takedown,
42941- ttm_bo_man_get_node,
42942- ttm_bo_man_put_node,
42943- ttm_bo_man_debug
42944+ .init = ttm_bo_man_init,
42945+ .takedown = ttm_bo_man_takedown,
42946+ .get_node = ttm_bo_man_get_node,
42947+ .put_node = ttm_bo_man_put_node,
42948+ .debug = ttm_bo_man_debug
42949 };
42950 EXPORT_SYMBOL(ttm_bo_manager_func);
42951diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
42952index dbc2def..0a9f710 100644
42953--- a/drivers/gpu/drm/ttm/ttm_memory.c
42954+++ b/drivers/gpu/drm/ttm/ttm_memory.c
42955@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
42956 zone->glob = glob;
42957 glob->zone_kernel = zone;
42958 ret = kobject_init_and_add(
42959- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
42960+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
42961 if (unlikely(ret != 0)) {
42962 kobject_put(&zone->kobj);
42963 return ret;
42964@@ -347,7 +347,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
42965 zone->glob = glob;
42966 glob->zone_dma32 = zone;
42967 ret = kobject_init_and_add(
42968- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
42969+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
42970 if (unlikely(ret != 0)) {
42971 kobject_put(&zone->kobj);
42972 return ret;
42973diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
42974index d1da339..829235e 100644
42975--- a/drivers/gpu/drm/udl/udl_fb.c
42976+++ b/drivers/gpu/drm/udl/udl_fb.c
42977@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
42978 fb_deferred_io_cleanup(info);
42979 kfree(info->fbdefio);
42980 info->fbdefio = NULL;
42981- info->fbops->fb_mmap = udl_fb_mmap;
42982 }
42983
42984 pr_warn("released /dev/fb%d user=%d count=%d\n",
42985diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
42986index ad02732..144f5ed 100644
42987--- a/drivers/gpu/drm/via/via_drv.h
42988+++ b/drivers/gpu/drm/via/via_drv.h
42989@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
42990 typedef uint32_t maskarray_t[5];
42991
42992 typedef struct drm_via_irq {
42993- atomic_t irq_received;
42994+ atomic_unchecked_t irq_received;
42995 uint32_t pending_mask;
42996 uint32_t enable_mask;
42997 wait_queue_head_t irq_queue;
42998@@ -75,7 +75,7 @@ typedef struct drm_via_private {
42999 struct timeval last_vblank;
43000 int last_vblank_valid;
43001 unsigned usec_per_vblank;
43002- atomic_t vbl_received;
43003+ atomic_unchecked_t vbl_received;
43004 drm_via_state_t hc_state;
43005 char pci_buf[VIA_PCI_BUF_SIZE];
43006 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
43007diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
43008index 1319433..a993b0c 100644
43009--- a/drivers/gpu/drm/via/via_irq.c
43010+++ b/drivers/gpu/drm/via/via_irq.c
43011@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
43012 if (crtc != 0)
43013 return 0;
43014
43015- return atomic_read(&dev_priv->vbl_received);
43016+ return atomic_read_unchecked(&dev_priv->vbl_received);
43017 }
43018
43019 irqreturn_t via_driver_irq_handler(int irq, void *arg)
43020@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
43021
43022 status = VIA_READ(VIA_REG_INTERRUPT);
43023 if (status & VIA_IRQ_VBLANK_PENDING) {
43024- atomic_inc(&dev_priv->vbl_received);
43025- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
43026+ atomic_inc_unchecked(&dev_priv->vbl_received);
43027+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
43028 do_gettimeofday(&cur_vblank);
43029 if (dev_priv->last_vblank_valid) {
43030 dev_priv->usec_per_vblank =
43031@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
43032 dev_priv->last_vblank = cur_vblank;
43033 dev_priv->last_vblank_valid = 1;
43034 }
43035- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
43036+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
43037 DRM_DEBUG("US per vblank is: %u\n",
43038 dev_priv->usec_per_vblank);
43039 }
43040@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
43041
43042 for (i = 0; i < dev_priv->num_irqs; ++i) {
43043 if (status & cur_irq->pending_mask) {
43044- atomic_inc(&cur_irq->irq_received);
43045+ atomic_inc_unchecked(&cur_irq->irq_received);
43046 wake_up(&cur_irq->irq_queue);
43047 handled = 1;
43048 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
43049@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
43050 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
43051 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
43052 masks[irq][4]));
43053- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
43054+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
43055 } else {
43056 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
43057 (((cur_irq_sequence =
43058- atomic_read(&cur_irq->irq_received)) -
43059+ atomic_read_unchecked(&cur_irq->irq_received)) -
43060 *sequence) <= (1 << 23)));
43061 }
43062 *sequence = cur_irq_sequence;
43063@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
43064 }
43065
43066 for (i = 0; i < dev_priv->num_irqs; ++i) {
43067- atomic_set(&cur_irq->irq_received, 0);
43068+ atomic_set_unchecked(&cur_irq->irq_received, 0);
43069 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
43070 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
43071 init_waitqueue_head(&cur_irq->irq_queue);
43072@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
43073 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
43074 case VIA_IRQ_RELATIVE:
43075 irqwait->request.sequence +=
43076- atomic_read(&cur_irq->irq_received);
43077+ atomic_read_unchecked(&cur_irq->irq_received);
43078 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
43079 case VIA_IRQ_ABSOLUTE:
43080 break;
43081diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
43082index 99f7317..33a835b 100644
43083--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
43084+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
43085@@ -447,7 +447,7 @@ struct vmw_private {
43086 * Fencing and IRQs.
43087 */
43088
43089- atomic_t marker_seq;
43090+ atomic_unchecked_t marker_seq;
43091 wait_queue_head_t fence_queue;
43092 wait_queue_head_t fifo_queue;
43093 int fence_queue_waiters; /* Protected by hw_mutex */
43094diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
43095index 6eae14d..aa311b3 100644
43096--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
43097+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
43098@@ -154,7 +154,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
43099 (unsigned int) min,
43100 (unsigned int) fifo->capabilities);
43101
43102- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
43103+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
43104 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
43105 vmw_marker_queue_init(&fifo->marker_queue);
43106 return vmw_fifo_send_fence(dev_priv, &dummy);
43107@@ -373,7 +373,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
43108 if (reserveable)
43109 iowrite32(bytes, fifo_mem +
43110 SVGA_FIFO_RESERVED);
43111- return fifo_mem + (next_cmd >> 2);
43112+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
43113 } else {
43114 need_bounce = true;
43115 }
43116@@ -493,7 +493,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
43117
43118 fm = vmw_fifo_reserve(dev_priv, bytes);
43119 if (unlikely(fm == NULL)) {
43120- *seqno = atomic_read(&dev_priv->marker_seq);
43121+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
43122 ret = -ENOMEM;
43123 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
43124 false, 3*HZ);
43125@@ -501,7 +501,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
43126 }
43127
43128 do {
43129- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
43130+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
43131 } while (*seqno == 0);
43132
43133 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
43134diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
43135index 26f8bdd..90a0008 100644
43136--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
43137+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
43138@@ -165,9 +165,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
43139 }
43140
43141 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
43142- vmw_gmrid_man_init,
43143- vmw_gmrid_man_takedown,
43144- vmw_gmrid_man_get_node,
43145- vmw_gmrid_man_put_node,
43146- vmw_gmrid_man_debug
43147+ .init = vmw_gmrid_man_init,
43148+ .takedown = vmw_gmrid_man_takedown,
43149+ .get_node = vmw_gmrid_man_get_node,
43150+ .put_node = vmw_gmrid_man_put_node,
43151+ .debug = vmw_gmrid_man_debug
43152 };
43153diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
43154index 37881ec..319065d 100644
43155--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
43156+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
43157@@ -235,7 +235,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
43158 int ret;
43159
43160 num_clips = arg->num_clips;
43161- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
43162+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
43163
43164 if (unlikely(num_clips == 0))
43165 return 0;
43166@@ -318,7 +318,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
43167 int ret;
43168
43169 num_clips = arg->num_clips;
43170- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
43171+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
43172
43173 if (unlikely(num_clips == 0))
43174 return 0;
43175diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
43176index 0c42376..6febe77 100644
43177--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
43178+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
43179@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
43180 * emitted. Then the fence is stale and signaled.
43181 */
43182
43183- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
43184+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
43185 > VMW_FENCE_WRAP);
43186
43187 return ret;
43188@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
43189
43190 if (fifo_idle)
43191 down_read(&fifo_state->rwsem);
43192- signal_seq = atomic_read(&dev_priv->marker_seq);
43193+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
43194 ret = 0;
43195
43196 for (;;) {
43197diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
43198index efd1ffd..0ae13ca 100644
43199--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
43200+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
43201@@ -135,7 +135,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
43202 while (!vmw_lag_lt(queue, us)) {
43203 spin_lock(&queue->lock);
43204 if (list_empty(&queue->head))
43205- seqno = atomic_read(&dev_priv->marker_seq);
43206+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
43207 else {
43208 marker = list_first_entry(&queue->head,
43209 struct vmw_marker, head);
43210diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
43211index 37ac7b5..d52a5c9 100644
43212--- a/drivers/gpu/vga/vga_switcheroo.c
43213+++ b/drivers/gpu/vga/vga_switcheroo.c
43214@@ -644,7 +644,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
43215
43216 /* this version is for the case where the power switch is separate
43217 to the device being powered down. */
43218-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
43219+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
43220 {
43221 /* copy over all the bus versions */
43222 if (dev->bus && dev->bus->pm) {
43223@@ -695,7 +695,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
43224 return ret;
43225 }
43226
43227-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
43228+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
43229 {
43230 /* copy over all the bus versions */
43231 if (dev->bus && dev->bus->pm) {
43232diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
43233index 12b6e67..ddd983c 100644
43234--- a/drivers/hid/hid-core.c
43235+++ b/drivers/hid/hid-core.c
43236@@ -2500,7 +2500,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
43237
43238 int hid_add_device(struct hid_device *hdev)
43239 {
43240- static atomic_t id = ATOMIC_INIT(0);
43241+ static atomic_unchecked_t id = ATOMIC_INIT(0);
43242 int ret;
43243
43244 if (WARN_ON(hdev->status & HID_STAT_ADDED))
43245@@ -2542,7 +2542,7 @@ int hid_add_device(struct hid_device *hdev)
43246 /* XXX hack, any other cleaner solution after the driver core
43247 * is converted to allow more than 20 bytes as the device name? */
43248 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
43249- hdev->vendor, hdev->product, atomic_inc_return(&id));
43250+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
43251
43252 hid_debug_register(hdev, dev_name(&hdev->dev));
43253 ret = device_add(&hdev->dev);
43254diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
43255index 9bf8637..f462416 100644
43256--- a/drivers/hid/hid-logitech-dj.c
43257+++ b/drivers/hid/hid-logitech-dj.c
43258@@ -682,6 +682,12 @@ static int logi_dj_raw_event(struct hid_device *hdev,
43259 * device (via hid_input_report() ) and return 1 so hid-core does not do
43260 * anything else with it.
43261 */
43262+ if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
43263+ (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
43264+ dev_err(&hdev->dev, "%s: invalid device index:%d\n",
43265+ __func__, dj_report->device_index);
43266+ return false;
43267+ }
43268
43269 /* case 1) */
43270 if (data[0] != REPORT_ID_DJ_SHORT)
43271diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
43272index c13fb5b..55a3802 100644
43273--- a/drivers/hid/hid-wiimote-debug.c
43274+++ b/drivers/hid/hid-wiimote-debug.c
43275@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
43276 else if (size == 0)
43277 return -EIO;
43278
43279- if (copy_to_user(u, buf, size))
43280+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
43281 return -EFAULT;
43282
43283 *off += size;
43284diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
43285index 0cb92e3..c7d453d 100644
43286--- a/drivers/hid/uhid.c
43287+++ b/drivers/hid/uhid.c
43288@@ -47,7 +47,7 @@ struct uhid_device {
43289 struct mutex report_lock;
43290 wait_queue_head_t report_wait;
43291 atomic_t report_done;
43292- atomic_t report_id;
43293+ atomic_unchecked_t report_id;
43294 struct uhid_event report_buf;
43295 };
43296
43297@@ -163,7 +163,7 @@ static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum,
43298
43299 spin_lock_irqsave(&uhid->qlock, flags);
43300 ev->type = UHID_FEATURE;
43301- ev->u.feature.id = atomic_inc_return(&uhid->report_id);
43302+ ev->u.feature.id = atomic_inc_return_unchecked(&uhid->report_id);
43303 ev->u.feature.rnum = rnum;
43304 ev->u.feature.rtype = report_type;
43305
43306@@ -538,7 +538,7 @@ static int uhid_dev_feature_answer(struct uhid_device *uhid,
43307 spin_lock_irqsave(&uhid->qlock, flags);
43308
43309 /* id for old report; drop it silently */
43310- if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id)
43311+ if (atomic_read_unchecked(&uhid->report_id) != ev->u.feature_answer.id)
43312 goto unlock;
43313 if (atomic_read(&uhid->report_done))
43314 goto unlock;
43315diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
43316index 19bad59..ca24eaf 100644
43317--- a/drivers/hv/channel.c
43318+++ b/drivers/hv/channel.c
43319@@ -366,8 +366,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
43320 unsigned long flags;
43321 int ret = 0;
43322
43323- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
43324- atomic_inc(&vmbus_connection.next_gpadl_handle);
43325+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
43326+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
43327
43328 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
43329 if (ret)
43330diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
43331index 3e4235c..877d0e5 100644
43332--- a/drivers/hv/hv.c
43333+++ b/drivers/hv/hv.c
43334@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
43335 u64 output_address = (output) ? virt_to_phys(output) : 0;
43336 u32 output_address_hi = output_address >> 32;
43337 u32 output_address_lo = output_address & 0xFFFFFFFF;
43338- void *hypercall_page = hv_context.hypercall_page;
43339+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
43340
43341 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
43342 "=a"(hv_status_lo) : "d" (control_hi),
43343@@ -156,7 +156,7 @@ int hv_init(void)
43344 /* See if the hypercall page is already set */
43345 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
43346
43347- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
43348+ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
43349
43350 if (!virtaddr)
43351 goto cleanup;
43352diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
43353index 5e90c5d..d8fcefb 100644
43354--- a/drivers/hv/hv_balloon.c
43355+++ b/drivers/hv/hv_balloon.c
43356@@ -470,7 +470,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
43357
43358 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
43359 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
43360-static atomic_t trans_id = ATOMIC_INIT(0);
43361+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
43362
43363 static int dm_ring_size = (5 * PAGE_SIZE);
43364
43365@@ -893,7 +893,7 @@ static void hot_add_req(struct work_struct *dummy)
43366 pr_info("Memory hot add failed\n");
43367
43368 dm->state = DM_INITIALIZED;
43369- resp.hdr.trans_id = atomic_inc_return(&trans_id);
43370+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43371 vmbus_sendpacket(dm->dev->channel, &resp,
43372 sizeof(struct dm_hot_add_response),
43373 (unsigned long)NULL,
43374@@ -973,7 +973,7 @@ static void post_status(struct hv_dynmem_device *dm)
43375 memset(&status, 0, sizeof(struct dm_status));
43376 status.hdr.type = DM_STATUS_REPORT;
43377 status.hdr.size = sizeof(struct dm_status);
43378- status.hdr.trans_id = atomic_inc_return(&trans_id);
43379+ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43380
43381 /*
43382 * The host expects the guest to report free memory.
43383@@ -993,7 +993,7 @@ static void post_status(struct hv_dynmem_device *dm)
43384 * send the status. This can happen if we were interrupted
43385 * after we picked our transaction ID.
43386 */
43387- if (status.hdr.trans_id != atomic_read(&trans_id))
43388+ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
43389 return;
43390
43391 /*
43392@@ -1129,7 +1129,7 @@ static void balloon_up(struct work_struct *dummy)
43393 */
43394
43395 do {
43396- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
43397+ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43398 ret = vmbus_sendpacket(dm_device.dev->channel,
43399 bl_resp,
43400 bl_resp->hdr.size,
43401@@ -1175,7 +1175,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
43402
43403 memset(&resp, 0, sizeof(struct dm_unballoon_response));
43404 resp.hdr.type = DM_UNBALLOON_RESPONSE;
43405- resp.hdr.trans_id = atomic_inc_return(&trans_id);
43406+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43407 resp.hdr.size = sizeof(struct dm_unballoon_response);
43408
43409 vmbus_sendpacket(dm_device.dev->channel, &resp,
43410@@ -1239,7 +1239,7 @@ static void version_resp(struct hv_dynmem_device *dm,
43411 memset(&version_req, 0, sizeof(struct dm_version_request));
43412 version_req.hdr.type = DM_VERSION_REQUEST;
43413 version_req.hdr.size = sizeof(struct dm_version_request);
43414- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
43415+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43416 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
43417 version_req.is_last_attempt = 1;
43418
43419@@ -1409,7 +1409,7 @@ static int balloon_probe(struct hv_device *dev,
43420 memset(&version_req, 0, sizeof(struct dm_version_request));
43421 version_req.hdr.type = DM_VERSION_REQUEST;
43422 version_req.hdr.size = sizeof(struct dm_version_request);
43423- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
43424+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43425 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
43426 version_req.is_last_attempt = 0;
43427
43428@@ -1440,7 +1440,7 @@ static int balloon_probe(struct hv_device *dev,
43429 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
43430 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
43431 cap_msg.hdr.size = sizeof(struct dm_capabilities);
43432- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
43433+ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
43434
43435 cap_msg.caps.cap_bits.balloon = 1;
43436 cap_msg.caps.cap_bits.hot_add = 1;
43437diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
43438index c386d8d..d6004c4 100644
43439--- a/drivers/hv/hyperv_vmbus.h
43440+++ b/drivers/hv/hyperv_vmbus.h
43441@@ -611,7 +611,7 @@ enum vmbus_connect_state {
43442 struct vmbus_connection {
43443 enum vmbus_connect_state conn_state;
43444
43445- atomic_t next_gpadl_handle;
43446+ atomic_unchecked_t next_gpadl_handle;
43447
43448 /*
43449 * Represents channel interrupts. Each bit position represents a
43450diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
43451index 4d6b269..2e23b86 100644
43452--- a/drivers/hv/vmbus_drv.c
43453+++ b/drivers/hv/vmbus_drv.c
43454@@ -807,10 +807,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
43455 {
43456 int ret = 0;
43457
43458- static atomic_t device_num = ATOMIC_INIT(0);
43459+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
43460
43461 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
43462- atomic_inc_return(&device_num));
43463+ atomic_inc_return_unchecked(&device_num));
43464
43465 child_device_obj->device.bus = &hv_bus;
43466 child_device_obj->device.parent = &hv_acpi_dev->dev;
43467diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
43468index 579bdf9..75118b5 100644
43469--- a/drivers/hwmon/acpi_power_meter.c
43470+++ b/drivers/hwmon/acpi_power_meter.c
43471@@ -116,7 +116,7 @@ struct sensor_template {
43472 struct device_attribute *devattr,
43473 const char *buf, size_t count);
43474 int index;
43475-};
43476+} __do_const;
43477
43478 /* Averaging interval */
43479 static int update_avg_interval(struct acpi_power_meter_resource *resource)
43480@@ -631,7 +631,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
43481 struct sensor_template *attrs)
43482 {
43483 struct device *dev = &resource->acpi_dev->dev;
43484- struct sensor_device_attribute *sensors =
43485+ sensor_device_attribute_no_const *sensors =
43486 &resource->sensors[resource->num_sensors];
43487 int res = 0;
43488
43489diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
43490index 3288f13..71cfb4e 100644
43491--- a/drivers/hwmon/applesmc.c
43492+++ b/drivers/hwmon/applesmc.c
43493@@ -1106,7 +1106,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
43494 {
43495 struct applesmc_node_group *grp;
43496 struct applesmc_dev_attr *node;
43497- struct attribute *attr;
43498+ attribute_no_const *attr;
43499 int ret, i;
43500
43501 for (grp = groups; grp->format; grp++) {
43502diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
43503index cccef87..06ce8ec 100644
43504--- a/drivers/hwmon/asus_atk0110.c
43505+++ b/drivers/hwmon/asus_atk0110.c
43506@@ -147,10 +147,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
43507 struct atk_sensor_data {
43508 struct list_head list;
43509 struct atk_data *data;
43510- struct device_attribute label_attr;
43511- struct device_attribute input_attr;
43512- struct device_attribute limit1_attr;
43513- struct device_attribute limit2_attr;
43514+ device_attribute_no_const label_attr;
43515+ device_attribute_no_const input_attr;
43516+ device_attribute_no_const limit1_attr;
43517+ device_attribute_no_const limit2_attr;
43518 char label_attr_name[ATTR_NAME_SIZE];
43519 char input_attr_name[ATTR_NAME_SIZE];
43520 char limit1_attr_name[ATTR_NAME_SIZE];
43521@@ -270,7 +270,7 @@ static ssize_t atk_name_show(struct device *dev,
43522 static struct device_attribute atk_name_attr =
43523 __ATTR(name, 0444, atk_name_show, NULL);
43524
43525-static void atk_init_attribute(struct device_attribute *attr, char *name,
43526+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
43527 sysfs_show_func show)
43528 {
43529 sysfs_attr_init(&attr->attr);
43530diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
43531index d76f0b7..55ae976 100644
43532--- a/drivers/hwmon/coretemp.c
43533+++ b/drivers/hwmon/coretemp.c
43534@@ -784,7 +784,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
43535 return NOTIFY_OK;
43536 }
43537
43538-static struct notifier_block coretemp_cpu_notifier __refdata = {
43539+static struct notifier_block coretemp_cpu_notifier = {
43540 .notifier_call = coretemp_cpu_callback,
43541 };
43542
43543diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
43544index 7a8a6fb..015c1fd 100644
43545--- a/drivers/hwmon/ibmaem.c
43546+++ b/drivers/hwmon/ibmaem.c
43547@@ -924,7 +924,7 @@ static int aem_register_sensors(struct aem_data *data,
43548 struct aem_rw_sensor_template *rw)
43549 {
43550 struct device *dev = &data->pdev->dev;
43551- struct sensor_device_attribute *sensors = data->sensors;
43552+ sensor_device_attribute_no_const *sensors = data->sensors;
43553 int err;
43554
43555 /* Set up read-only sensors */
43556diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
43557index 14c82da..09b25d7 100644
43558--- a/drivers/hwmon/iio_hwmon.c
43559+++ b/drivers/hwmon/iio_hwmon.c
43560@@ -61,7 +61,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
43561 {
43562 struct device *dev = &pdev->dev;
43563 struct iio_hwmon_state *st;
43564- struct sensor_device_attribute *a;
43565+ sensor_device_attribute_no_const *a;
43566 int ret, i;
43567 int in_i = 1, temp_i = 1, curr_i = 1;
43568 enum iio_chan_type type;
43569diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
43570index 7710f46..427a28d 100644
43571--- a/drivers/hwmon/nct6683.c
43572+++ b/drivers/hwmon/nct6683.c
43573@@ -397,11 +397,11 @@ static struct attribute_group *
43574 nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
43575 int repeat)
43576 {
43577- struct sensor_device_attribute_2 *a2;
43578- struct sensor_device_attribute *a;
43579+ sensor_device_attribute_2_no_const *a2;
43580+ sensor_device_attribute_no_const *a;
43581 struct sensor_device_template **t;
43582 struct sensor_device_attr_u *su;
43583- struct attribute_group *group;
43584+ attribute_group_no_const *group;
43585 struct attribute **attrs;
43586 int i, j, count;
43587
43588diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
43589index 504cbdd..35d6f25 100644
43590--- a/drivers/hwmon/nct6775.c
43591+++ b/drivers/hwmon/nct6775.c
43592@@ -943,10 +943,10 @@ static struct attribute_group *
43593 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
43594 int repeat)
43595 {
43596- struct attribute_group *group;
43597+ attribute_group_no_const *group;
43598 struct sensor_device_attr_u *su;
43599- struct sensor_device_attribute *a;
43600- struct sensor_device_attribute_2 *a2;
43601+ sensor_device_attribute_no_const *a;
43602+ sensor_device_attribute_2_no_const *a2;
43603 struct attribute **attrs;
43604 struct sensor_device_template **t;
43605 int i, count;
43606diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
43607index 291d11f..3f0dbbd 100644
43608--- a/drivers/hwmon/pmbus/pmbus_core.c
43609+++ b/drivers/hwmon/pmbus/pmbus_core.c
43610@@ -783,7 +783,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
43611 return 0;
43612 }
43613
43614-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
43615+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
43616 const char *name,
43617 umode_t mode,
43618 ssize_t (*show)(struct device *dev,
43619@@ -800,7 +800,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
43620 dev_attr->store = store;
43621 }
43622
43623-static void pmbus_attr_init(struct sensor_device_attribute *a,
43624+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
43625 const char *name,
43626 umode_t mode,
43627 ssize_t (*show)(struct device *dev,
43628@@ -822,7 +822,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
43629 u16 reg, u8 mask)
43630 {
43631 struct pmbus_boolean *boolean;
43632- struct sensor_device_attribute *a;
43633+ sensor_device_attribute_no_const *a;
43634
43635 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
43636 if (!boolean)
43637@@ -847,7 +847,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
43638 bool update, bool readonly)
43639 {
43640 struct pmbus_sensor *sensor;
43641- struct device_attribute *a;
43642+ device_attribute_no_const *a;
43643
43644 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
43645 if (!sensor)
43646@@ -878,7 +878,7 @@ static int pmbus_add_label(struct pmbus_data *data,
43647 const char *lstring, int index)
43648 {
43649 struct pmbus_label *label;
43650- struct device_attribute *a;
43651+ device_attribute_no_const *a;
43652
43653 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
43654 if (!label)
43655diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
43656index 97cd45a..ac54d8b 100644
43657--- a/drivers/hwmon/sht15.c
43658+++ b/drivers/hwmon/sht15.c
43659@@ -169,7 +169,7 @@ struct sht15_data {
43660 int supply_uv;
43661 bool supply_uv_valid;
43662 struct work_struct update_supply_work;
43663- atomic_t interrupt_handled;
43664+ atomic_unchecked_t interrupt_handled;
43665 };
43666
43667 /**
43668@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
43669 ret = gpio_direction_input(data->pdata->gpio_data);
43670 if (ret)
43671 return ret;
43672- atomic_set(&data->interrupt_handled, 0);
43673+ atomic_set_unchecked(&data->interrupt_handled, 0);
43674
43675 enable_irq(gpio_to_irq(data->pdata->gpio_data));
43676 if (gpio_get_value(data->pdata->gpio_data) == 0) {
43677 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
43678 /* Only relevant if the interrupt hasn't occurred. */
43679- if (!atomic_read(&data->interrupt_handled))
43680+ if (!atomic_read_unchecked(&data->interrupt_handled))
43681 schedule_work(&data->read_work);
43682 }
43683 ret = wait_event_timeout(data->wait_queue,
43684@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
43685
43686 /* First disable the interrupt */
43687 disable_irq_nosync(irq);
43688- atomic_inc(&data->interrupt_handled);
43689+ atomic_inc_unchecked(&data->interrupt_handled);
43690 /* Then schedule a reading work struct */
43691 if (data->state != SHT15_READING_NOTHING)
43692 schedule_work(&data->read_work);
43693@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
43694 * If not, then start the interrupt again - care here as could
43695 * have gone low in meantime so verify it hasn't!
43696 */
43697- atomic_set(&data->interrupt_handled, 0);
43698+ atomic_set_unchecked(&data->interrupt_handled, 0);
43699 enable_irq(gpio_to_irq(data->pdata->gpio_data));
43700 /* If still not occurred or another handler was scheduled */
43701 if (gpio_get_value(data->pdata->gpio_data)
43702- || atomic_read(&data->interrupt_handled))
43703+ || atomic_read_unchecked(&data->interrupt_handled))
43704 return;
43705 }
43706
43707diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
43708index 8df43c5..b07b91d 100644
43709--- a/drivers/hwmon/via-cputemp.c
43710+++ b/drivers/hwmon/via-cputemp.c
43711@@ -296,7 +296,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
43712 return NOTIFY_OK;
43713 }
43714
43715-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
43716+static struct notifier_block via_cputemp_cpu_notifier = {
43717 .notifier_call = via_cputemp_cpu_callback,
43718 };
43719
43720diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
43721index 41fc683..a39cfea 100644
43722--- a/drivers/i2c/busses/i2c-amd756-s4882.c
43723+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
43724@@ -43,7 +43,7 @@
43725 extern struct i2c_adapter amd756_smbus;
43726
43727 static struct i2c_adapter *s4882_adapter;
43728-static struct i2c_algorithm *s4882_algo;
43729+static i2c_algorithm_no_const *s4882_algo;
43730
43731 /* Wrapper access functions for multiplexed SMBus */
43732 static DEFINE_MUTEX(amd756_lock);
43733diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
43734index b19a310..d6eece0 100644
43735--- a/drivers/i2c/busses/i2c-diolan-u2c.c
43736+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
43737@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
43738 /* usb layer */
43739
43740 /* Send command to device, and get response. */
43741-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
43742+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
43743 {
43744 int ret = 0;
43745 int actual;
43746diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
43747index b170bdf..3c76427 100644
43748--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
43749+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
43750@@ -41,7 +41,7 @@
43751 extern struct i2c_adapter *nforce2_smbus;
43752
43753 static struct i2c_adapter *s4985_adapter;
43754-static struct i2c_algorithm *s4985_algo;
43755+static i2c_algorithm_no_const *s4985_algo;
43756
43757 /* Wrapper access functions for multiplexed SMBus */
43758 static DEFINE_MUTEX(nforce2_lock);
43759diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
43760index 80b47e8..1a6040d9 100644
43761--- a/drivers/i2c/i2c-dev.c
43762+++ b/drivers/i2c/i2c-dev.c
43763@@ -277,7 +277,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
43764 break;
43765 }
43766
43767- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
43768+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
43769 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
43770 if (IS_ERR(rdwr_pa[i].buf)) {
43771 res = PTR_ERR(rdwr_pa[i].buf);
43772diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
43773index 0b510ba..4fbb5085 100644
43774--- a/drivers/ide/ide-cd.c
43775+++ b/drivers/ide/ide-cd.c
43776@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
43777 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
43778 if ((unsigned long)buf & alignment
43779 || blk_rq_bytes(rq) & q->dma_pad_mask
43780- || object_is_on_stack(buf))
43781+ || object_starts_on_stack(buf))
43782 drive->dma = 0;
43783 }
43784 }
43785diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
43786index af3e76d..96dfe5e 100644
43787--- a/drivers/iio/industrialio-core.c
43788+++ b/drivers/iio/industrialio-core.c
43789@@ -555,7 +555,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
43790 }
43791
43792 static
43793-int __iio_device_attr_init(struct device_attribute *dev_attr,
43794+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
43795 const char *postfix,
43796 struct iio_chan_spec const *chan,
43797 ssize_t (*readfunc)(struct device *dev,
43798diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
43799index e28a494..f7c2671 100644
43800--- a/drivers/infiniband/core/cm.c
43801+++ b/drivers/infiniband/core/cm.c
43802@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
43803
43804 struct cm_counter_group {
43805 struct kobject obj;
43806- atomic_long_t counter[CM_ATTR_COUNT];
43807+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
43808 };
43809
43810 struct cm_counter_attribute {
43811@@ -1398,7 +1398,7 @@ static void cm_dup_req_handler(struct cm_work *work,
43812 struct ib_mad_send_buf *msg = NULL;
43813 int ret;
43814
43815- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43816+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43817 counter[CM_REQ_COUNTER]);
43818
43819 /* Quick state check to discard duplicate REQs. */
43820@@ -1785,7 +1785,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
43821 if (!cm_id_priv)
43822 return;
43823
43824- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43825+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43826 counter[CM_REP_COUNTER]);
43827 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
43828 if (ret)
43829@@ -1952,7 +1952,7 @@ static int cm_rtu_handler(struct cm_work *work)
43830 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
43831 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
43832 spin_unlock_irq(&cm_id_priv->lock);
43833- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43834+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43835 counter[CM_RTU_COUNTER]);
43836 goto out;
43837 }
43838@@ -2135,7 +2135,7 @@ static int cm_dreq_handler(struct cm_work *work)
43839 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
43840 dreq_msg->local_comm_id);
43841 if (!cm_id_priv) {
43842- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43843+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43844 counter[CM_DREQ_COUNTER]);
43845 cm_issue_drep(work->port, work->mad_recv_wc);
43846 return -EINVAL;
43847@@ -2160,7 +2160,7 @@ static int cm_dreq_handler(struct cm_work *work)
43848 case IB_CM_MRA_REP_RCVD:
43849 break;
43850 case IB_CM_TIMEWAIT:
43851- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43852+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43853 counter[CM_DREQ_COUNTER]);
43854 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
43855 goto unlock;
43856@@ -2174,7 +2174,7 @@ static int cm_dreq_handler(struct cm_work *work)
43857 cm_free_msg(msg);
43858 goto deref;
43859 case IB_CM_DREQ_RCVD:
43860- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43861+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43862 counter[CM_DREQ_COUNTER]);
43863 goto unlock;
43864 default:
43865@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work)
43866 ib_modify_mad(cm_id_priv->av.port->mad_agent,
43867 cm_id_priv->msg, timeout)) {
43868 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
43869- atomic_long_inc(&work->port->
43870+ atomic_long_inc_unchecked(&work->port->
43871 counter_group[CM_RECV_DUPLICATES].
43872 counter[CM_MRA_COUNTER]);
43873 goto out;
43874@@ -2550,7 +2550,7 @@ static int cm_mra_handler(struct cm_work *work)
43875 break;
43876 case IB_CM_MRA_REQ_RCVD:
43877 case IB_CM_MRA_REP_RCVD:
43878- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43879+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43880 counter[CM_MRA_COUNTER]);
43881 /* fall through */
43882 default:
43883@@ -2712,7 +2712,7 @@ static int cm_lap_handler(struct cm_work *work)
43884 case IB_CM_LAP_IDLE:
43885 break;
43886 case IB_CM_MRA_LAP_SENT:
43887- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43888+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43889 counter[CM_LAP_COUNTER]);
43890 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
43891 goto unlock;
43892@@ -2728,7 +2728,7 @@ static int cm_lap_handler(struct cm_work *work)
43893 cm_free_msg(msg);
43894 goto deref;
43895 case IB_CM_LAP_RCVD:
43896- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43897+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43898 counter[CM_LAP_COUNTER]);
43899 goto unlock;
43900 default:
43901@@ -3012,7 +3012,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
43902 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
43903 if (cur_cm_id_priv) {
43904 spin_unlock_irq(&cm.lock);
43905- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43906+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43907 counter[CM_SIDR_REQ_COUNTER]);
43908 goto out; /* Duplicate message. */
43909 }
43910@@ -3224,10 +3224,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
43911 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
43912 msg->retries = 1;
43913
43914- atomic_long_add(1 + msg->retries,
43915+ atomic_long_add_unchecked(1 + msg->retries,
43916 &port->counter_group[CM_XMIT].counter[attr_index]);
43917 if (msg->retries)
43918- atomic_long_add(msg->retries,
43919+ atomic_long_add_unchecked(msg->retries,
43920 &port->counter_group[CM_XMIT_RETRIES].
43921 counter[attr_index]);
43922
43923@@ -3437,7 +3437,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
43924 }
43925
43926 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
43927- atomic_long_inc(&port->counter_group[CM_RECV].
43928+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
43929 counter[attr_id - CM_ATTR_ID_OFFSET]);
43930
43931 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
43932@@ -3668,7 +3668,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
43933 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
43934
43935 return sprintf(buf, "%ld\n",
43936- atomic_long_read(&group->counter[cm_attr->index]));
43937+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
43938 }
43939
43940 static const struct sysfs_ops cm_counter_ops = {
43941diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
43942index 9f5ad7c..588cd84 100644
43943--- a/drivers/infiniband/core/fmr_pool.c
43944+++ b/drivers/infiniband/core/fmr_pool.c
43945@@ -98,8 +98,8 @@ struct ib_fmr_pool {
43946
43947 struct task_struct *thread;
43948
43949- atomic_t req_ser;
43950- atomic_t flush_ser;
43951+ atomic_unchecked_t req_ser;
43952+ atomic_unchecked_t flush_ser;
43953
43954 wait_queue_head_t force_wait;
43955 };
43956@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
43957 struct ib_fmr_pool *pool = pool_ptr;
43958
43959 do {
43960- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
43961+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
43962 ib_fmr_batch_release(pool);
43963
43964- atomic_inc(&pool->flush_ser);
43965+ atomic_inc_unchecked(&pool->flush_ser);
43966 wake_up_interruptible(&pool->force_wait);
43967
43968 if (pool->flush_function)
43969@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
43970 }
43971
43972 set_current_state(TASK_INTERRUPTIBLE);
43973- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
43974+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
43975 !kthread_should_stop())
43976 schedule();
43977 __set_current_state(TASK_RUNNING);
43978@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
43979 pool->dirty_watermark = params->dirty_watermark;
43980 pool->dirty_len = 0;
43981 spin_lock_init(&pool->pool_lock);
43982- atomic_set(&pool->req_ser, 0);
43983- atomic_set(&pool->flush_ser, 0);
43984+ atomic_set_unchecked(&pool->req_ser, 0);
43985+ atomic_set_unchecked(&pool->flush_ser, 0);
43986 init_waitqueue_head(&pool->force_wait);
43987
43988 pool->thread = kthread_run(ib_fmr_cleanup_thread,
43989@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
43990 }
43991 spin_unlock_irq(&pool->pool_lock);
43992
43993- serial = atomic_inc_return(&pool->req_ser);
43994+ serial = atomic_inc_return_unchecked(&pool->req_ser);
43995 wake_up_process(pool->thread);
43996
43997 if (wait_event_interruptible(pool->force_wait,
43998- atomic_read(&pool->flush_ser) - serial >= 0))
43999+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
44000 return -EINTR;
44001
44002 return 0;
44003@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
44004 } else {
44005 list_add_tail(&fmr->list, &pool->dirty_list);
44006 if (++pool->dirty_len >= pool->dirty_watermark) {
44007- atomic_inc(&pool->req_ser);
44008+ atomic_inc_unchecked(&pool->req_ser);
44009 wake_up_process(pool->thread);
44010 }
44011 }
44012diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
44013index ec7a298..8742e59 100644
44014--- a/drivers/infiniband/hw/cxgb4/mem.c
44015+++ b/drivers/infiniband/hw/cxgb4/mem.c
44016@@ -249,7 +249,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
44017 int err;
44018 struct fw_ri_tpte tpt;
44019 u32 stag_idx;
44020- static atomic_t key;
44021+ static atomic_unchecked_t key;
44022
44023 if (c4iw_fatal_error(rdev))
44024 return -EIO;
44025@@ -270,7 +270,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
44026 if (rdev->stats.stag.cur > rdev->stats.stag.max)
44027 rdev->stats.stag.max = rdev->stats.stag.cur;
44028 mutex_unlock(&rdev->stats.lock);
44029- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
44030+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
44031 }
44032 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
44033 __func__, stag_state, type, pdid, stag_idx);
44034diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
44035index 79b3dbc..96e5fcc 100644
44036--- a/drivers/infiniband/hw/ipath/ipath_rc.c
44037+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
44038@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
44039 struct ib_atomic_eth *ateth;
44040 struct ipath_ack_entry *e;
44041 u64 vaddr;
44042- atomic64_t *maddr;
44043+ atomic64_unchecked_t *maddr;
44044 u64 sdata;
44045 u32 rkey;
44046 u8 next;
44047@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
44048 IB_ACCESS_REMOTE_ATOMIC)))
44049 goto nack_acc_unlck;
44050 /* Perform atomic OP and save result. */
44051- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
44052+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
44053 sdata = be64_to_cpu(ateth->swap_data);
44054 e = &qp->s_ack_queue[qp->r_head_ack_queue];
44055 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
44056- (u64) atomic64_add_return(sdata, maddr) - sdata :
44057+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
44058 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
44059 be64_to_cpu(ateth->compare_data),
44060 sdata);
44061diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
44062index 1f95bba..9530f87 100644
44063--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
44064+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
44065@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
44066 unsigned long flags;
44067 struct ib_wc wc;
44068 u64 sdata;
44069- atomic64_t *maddr;
44070+ atomic64_unchecked_t *maddr;
44071 enum ib_wc_status send_status;
44072
44073 /*
44074@@ -382,11 +382,11 @@ again:
44075 IB_ACCESS_REMOTE_ATOMIC)))
44076 goto acc_err;
44077 /* Perform atomic OP and save result. */
44078- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
44079+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
44080 sdata = wqe->wr.wr.atomic.compare_add;
44081 *(u64 *) sqp->s_sge.sge.vaddr =
44082 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
44083- (u64) atomic64_add_return(sdata, maddr) - sdata :
44084+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
44085 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
44086 sdata, wqe->wr.wr.atomic.swap);
44087 goto send_comp;
44088diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
44089index 82a7dd8..8fb6ba6 100644
44090--- a/drivers/infiniband/hw/mlx4/mad.c
44091+++ b/drivers/infiniband/hw/mlx4/mad.c
44092@@ -98,7 +98,7 @@ __be64 mlx4_ib_gen_node_guid(void)
44093
44094 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
44095 {
44096- return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
44097+ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
44098 cpu_to_be64(0xff00000000000000LL);
44099 }
44100
44101diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
44102index ed327e6..ca1739e0 100644
44103--- a/drivers/infiniband/hw/mlx4/mcg.c
44104+++ b/drivers/infiniband/hw/mlx4/mcg.c
44105@@ -1041,7 +1041,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
44106 {
44107 char name[20];
44108
44109- atomic_set(&ctx->tid, 0);
44110+ atomic_set_unchecked(&ctx->tid, 0);
44111 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
44112 ctx->mcg_wq = create_singlethread_workqueue(name);
44113 if (!ctx->mcg_wq)
44114diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
44115index 6eb743f..a7b0f6d 100644
44116--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
44117+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
44118@@ -426,7 +426,7 @@ struct mlx4_ib_demux_ctx {
44119 struct list_head mcg_mgid0_list;
44120 struct workqueue_struct *mcg_wq;
44121 struct mlx4_ib_demux_pv_ctx **tun;
44122- atomic_t tid;
44123+ atomic_unchecked_t tid;
44124 int flushing; /* flushing the work queue */
44125 };
44126
44127diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
44128index 9d3e5c1..6f166df 100644
44129--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
44130+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
44131@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
44132 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
44133 }
44134
44135-int mthca_QUERY_FW(struct mthca_dev *dev)
44136+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
44137 {
44138 struct mthca_mailbox *mailbox;
44139 u32 *outbox;
44140@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44141 CMD_TIME_CLASS_B);
44142 }
44143
44144-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44145+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44146 int num_mtt)
44147 {
44148 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
44149@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
44150 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
44151 }
44152
44153-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44154+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
44155 int eq_num)
44156 {
44157 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
44158@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
44159 CMD_TIME_CLASS_B);
44160 }
44161
44162-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
44163+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
44164 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
44165 void *in_mad, void *response_mad)
44166 {
44167diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
44168index ded76c1..0cf0a08 100644
44169--- a/drivers/infiniband/hw/mthca/mthca_main.c
44170+++ b/drivers/infiniband/hw/mthca/mthca_main.c
44171@@ -692,7 +692,7 @@ err_close:
44172 return err;
44173 }
44174
44175-static int mthca_setup_hca(struct mthca_dev *dev)
44176+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
44177 {
44178 int err;
44179
44180diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
44181index ed9a989..6aa5dc2 100644
44182--- a/drivers/infiniband/hw/mthca/mthca_mr.c
44183+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
44184@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
44185 * through the bitmaps)
44186 */
44187
44188-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
44189+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
44190 {
44191 int o;
44192 int m;
44193@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
44194 return key;
44195 }
44196
44197-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
44198+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
44199 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
44200 {
44201 struct mthca_mailbox *mailbox;
44202@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
44203 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
44204 }
44205
44206-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
44207+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
44208 u64 *buffer_list, int buffer_size_shift,
44209 int list_len, u64 iova, u64 total_size,
44210 u32 access, struct mthca_mr *mr)
44211diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
44212index 415f8e1..e34214e 100644
44213--- a/drivers/infiniband/hw/mthca/mthca_provider.c
44214+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
44215@@ -764,7 +764,7 @@ unlock:
44216 return 0;
44217 }
44218
44219-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
44220+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
44221 {
44222 struct mthca_dev *dev = to_mdev(ibcq->device);
44223 struct mthca_cq *cq = to_mcq(ibcq);
44224diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
44225index 3b2a6dc..bce26ff 100644
44226--- a/drivers/infiniband/hw/nes/nes.c
44227+++ b/drivers/infiniband/hw/nes/nes.c
44228@@ -97,7 +97,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
44229 LIST_HEAD(nes_adapter_list);
44230 static LIST_HEAD(nes_dev_list);
44231
44232-atomic_t qps_destroyed;
44233+atomic_unchecked_t qps_destroyed;
44234
44235 static unsigned int ee_flsh_adapter;
44236 static unsigned int sysfs_nonidx_addr;
44237@@ -278,7 +278,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
44238 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
44239 struct nes_adapter *nesadapter = nesdev->nesadapter;
44240
44241- atomic_inc(&qps_destroyed);
44242+ atomic_inc_unchecked(&qps_destroyed);
44243
44244 /* Free the control structures */
44245
44246diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
44247index bd9d132..70d84f4 100644
44248--- a/drivers/infiniband/hw/nes/nes.h
44249+++ b/drivers/infiniband/hw/nes/nes.h
44250@@ -180,17 +180,17 @@ extern unsigned int nes_debug_level;
44251 extern unsigned int wqm_quanta;
44252 extern struct list_head nes_adapter_list;
44253
44254-extern atomic_t cm_connects;
44255-extern atomic_t cm_accepts;
44256-extern atomic_t cm_disconnects;
44257-extern atomic_t cm_closes;
44258-extern atomic_t cm_connecteds;
44259-extern atomic_t cm_connect_reqs;
44260-extern atomic_t cm_rejects;
44261-extern atomic_t mod_qp_timouts;
44262-extern atomic_t qps_created;
44263-extern atomic_t qps_destroyed;
44264-extern atomic_t sw_qps_destroyed;
44265+extern atomic_unchecked_t cm_connects;
44266+extern atomic_unchecked_t cm_accepts;
44267+extern atomic_unchecked_t cm_disconnects;
44268+extern atomic_unchecked_t cm_closes;
44269+extern atomic_unchecked_t cm_connecteds;
44270+extern atomic_unchecked_t cm_connect_reqs;
44271+extern atomic_unchecked_t cm_rejects;
44272+extern atomic_unchecked_t mod_qp_timouts;
44273+extern atomic_unchecked_t qps_created;
44274+extern atomic_unchecked_t qps_destroyed;
44275+extern atomic_unchecked_t sw_qps_destroyed;
44276 extern u32 mh_detected;
44277 extern u32 mh_pauses_sent;
44278 extern u32 cm_packets_sent;
44279@@ -199,16 +199,16 @@ extern u32 cm_packets_created;
44280 extern u32 cm_packets_received;
44281 extern u32 cm_packets_dropped;
44282 extern u32 cm_packets_retrans;
44283-extern atomic_t cm_listens_created;
44284-extern atomic_t cm_listens_destroyed;
44285+extern atomic_unchecked_t cm_listens_created;
44286+extern atomic_unchecked_t cm_listens_destroyed;
44287 extern u32 cm_backlog_drops;
44288-extern atomic_t cm_loopbacks;
44289-extern atomic_t cm_nodes_created;
44290-extern atomic_t cm_nodes_destroyed;
44291-extern atomic_t cm_accel_dropped_pkts;
44292-extern atomic_t cm_resets_recvd;
44293-extern atomic_t pau_qps_created;
44294-extern atomic_t pau_qps_destroyed;
44295+extern atomic_unchecked_t cm_loopbacks;
44296+extern atomic_unchecked_t cm_nodes_created;
44297+extern atomic_unchecked_t cm_nodes_destroyed;
44298+extern atomic_unchecked_t cm_accel_dropped_pkts;
44299+extern atomic_unchecked_t cm_resets_recvd;
44300+extern atomic_unchecked_t pau_qps_created;
44301+extern atomic_unchecked_t pau_qps_destroyed;
44302
44303 extern u32 int_mod_timer_init;
44304 extern u32 int_mod_cq_depth_256;
44305diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
44306index 6f09a72..cf4399d 100644
44307--- a/drivers/infiniband/hw/nes/nes_cm.c
44308+++ b/drivers/infiniband/hw/nes/nes_cm.c
44309@@ -69,14 +69,14 @@ u32 cm_packets_dropped;
44310 u32 cm_packets_retrans;
44311 u32 cm_packets_created;
44312 u32 cm_packets_received;
44313-atomic_t cm_listens_created;
44314-atomic_t cm_listens_destroyed;
44315+atomic_unchecked_t cm_listens_created;
44316+atomic_unchecked_t cm_listens_destroyed;
44317 u32 cm_backlog_drops;
44318-atomic_t cm_loopbacks;
44319-atomic_t cm_nodes_created;
44320-atomic_t cm_nodes_destroyed;
44321-atomic_t cm_accel_dropped_pkts;
44322-atomic_t cm_resets_recvd;
44323+atomic_unchecked_t cm_loopbacks;
44324+atomic_unchecked_t cm_nodes_created;
44325+atomic_unchecked_t cm_nodes_destroyed;
44326+atomic_unchecked_t cm_accel_dropped_pkts;
44327+atomic_unchecked_t cm_resets_recvd;
44328
44329 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
44330 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
44331@@ -135,28 +135,28 @@ static void record_ird_ord(struct nes_cm_node *, u16, u16);
44332 /* instance of function pointers for client API */
44333 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
44334 static struct nes_cm_ops nes_cm_api = {
44335- mini_cm_accelerated,
44336- mini_cm_listen,
44337- mini_cm_del_listen,
44338- mini_cm_connect,
44339- mini_cm_close,
44340- mini_cm_accept,
44341- mini_cm_reject,
44342- mini_cm_recv_pkt,
44343- mini_cm_dealloc_core,
44344- mini_cm_get,
44345- mini_cm_set
44346+ .accelerated = mini_cm_accelerated,
44347+ .listen = mini_cm_listen,
44348+ .stop_listener = mini_cm_del_listen,
44349+ .connect = mini_cm_connect,
44350+ .close = mini_cm_close,
44351+ .accept = mini_cm_accept,
44352+ .reject = mini_cm_reject,
44353+ .recv_pkt = mini_cm_recv_pkt,
44354+ .destroy_cm_core = mini_cm_dealloc_core,
44355+ .get = mini_cm_get,
44356+ .set = mini_cm_set
44357 };
44358
44359 static struct nes_cm_core *g_cm_core;
44360
44361-atomic_t cm_connects;
44362-atomic_t cm_accepts;
44363-atomic_t cm_disconnects;
44364-atomic_t cm_closes;
44365-atomic_t cm_connecteds;
44366-atomic_t cm_connect_reqs;
44367-atomic_t cm_rejects;
44368+atomic_unchecked_t cm_connects;
44369+atomic_unchecked_t cm_accepts;
44370+atomic_unchecked_t cm_disconnects;
44371+atomic_unchecked_t cm_closes;
44372+atomic_unchecked_t cm_connecteds;
44373+atomic_unchecked_t cm_connect_reqs;
44374+atomic_unchecked_t cm_rejects;
44375
44376 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
44377 {
44378@@ -1436,7 +1436,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
44379 kfree(listener);
44380 listener = NULL;
44381 ret = 0;
44382- atomic_inc(&cm_listens_destroyed);
44383+ atomic_inc_unchecked(&cm_listens_destroyed);
44384 } else {
44385 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
44386 }
44387@@ -1637,7 +1637,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
44388 cm_node->rem_mac);
44389
44390 add_hte_node(cm_core, cm_node);
44391- atomic_inc(&cm_nodes_created);
44392+ atomic_inc_unchecked(&cm_nodes_created);
44393
44394 return cm_node;
44395 }
44396@@ -1698,7 +1698,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
44397 }
44398
44399 atomic_dec(&cm_core->node_cnt);
44400- atomic_inc(&cm_nodes_destroyed);
44401+ atomic_inc_unchecked(&cm_nodes_destroyed);
44402 nesqp = cm_node->nesqp;
44403 if (nesqp) {
44404 nesqp->cm_node = NULL;
44405@@ -1762,7 +1762,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
44406
44407 static void drop_packet(struct sk_buff *skb)
44408 {
44409- atomic_inc(&cm_accel_dropped_pkts);
44410+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
44411 dev_kfree_skb_any(skb);
44412 }
44413
44414@@ -1825,7 +1825,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
44415 {
44416
44417 int reset = 0; /* whether to send reset in case of err.. */
44418- atomic_inc(&cm_resets_recvd);
44419+ atomic_inc_unchecked(&cm_resets_recvd);
44420 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
44421 " refcnt=%d\n", cm_node, cm_node->state,
44422 atomic_read(&cm_node->ref_count));
44423@@ -2492,7 +2492,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
44424 rem_ref_cm_node(cm_node->cm_core, cm_node);
44425 return NULL;
44426 }
44427- atomic_inc(&cm_loopbacks);
44428+ atomic_inc_unchecked(&cm_loopbacks);
44429 loopbackremotenode->loopbackpartner = cm_node;
44430 loopbackremotenode->tcp_cntxt.rcv_wscale =
44431 NES_CM_DEFAULT_RCV_WND_SCALE;
44432@@ -2773,7 +2773,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
44433 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
44434 else {
44435 rem_ref_cm_node(cm_core, cm_node);
44436- atomic_inc(&cm_accel_dropped_pkts);
44437+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
44438 dev_kfree_skb_any(skb);
44439 }
44440 break;
44441@@ -3081,7 +3081,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
44442
44443 if ((cm_id) && (cm_id->event_handler)) {
44444 if (issue_disconn) {
44445- atomic_inc(&cm_disconnects);
44446+ atomic_inc_unchecked(&cm_disconnects);
44447 cm_event.event = IW_CM_EVENT_DISCONNECT;
44448 cm_event.status = disconn_status;
44449 cm_event.local_addr = cm_id->local_addr;
44450@@ -3103,7 +3103,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
44451 }
44452
44453 if (issue_close) {
44454- atomic_inc(&cm_closes);
44455+ atomic_inc_unchecked(&cm_closes);
44456 nes_disconnect(nesqp, 1);
44457
44458 cm_id->provider_data = nesqp;
44459@@ -3241,7 +3241,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
44460
44461 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
44462 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
44463- atomic_inc(&cm_accepts);
44464+ atomic_inc_unchecked(&cm_accepts);
44465
44466 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
44467 netdev_refcnt_read(nesvnic->netdev));
44468@@ -3439,7 +3439,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
44469 struct nes_cm_core *cm_core;
44470 u8 *start_buff;
44471
44472- atomic_inc(&cm_rejects);
44473+ atomic_inc_unchecked(&cm_rejects);
44474 cm_node = (struct nes_cm_node *)cm_id->provider_data;
44475 loopback = cm_node->loopbackpartner;
44476 cm_core = cm_node->cm_core;
44477@@ -3504,7 +3504,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
44478 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
44479 ntohs(laddr->sin_port));
44480
44481- atomic_inc(&cm_connects);
44482+ atomic_inc_unchecked(&cm_connects);
44483 nesqp->active_conn = 1;
44484
44485 /* cache the cm_id in the qp */
44486@@ -3649,7 +3649,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
44487 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
44488 return err;
44489 }
44490- atomic_inc(&cm_listens_created);
44491+ atomic_inc_unchecked(&cm_listens_created);
44492 }
44493
44494 cm_id->add_ref(cm_id);
44495@@ -3756,7 +3756,7 @@ static void cm_event_connected(struct nes_cm_event *event)
44496
44497 if (nesqp->destroyed)
44498 return;
44499- atomic_inc(&cm_connecteds);
44500+ atomic_inc_unchecked(&cm_connecteds);
44501 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
44502 " local port 0x%04X. jiffies = %lu.\n",
44503 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
44504@@ -3941,7 +3941,7 @@ static void cm_event_reset(struct nes_cm_event *event)
44505
44506 cm_id->add_ref(cm_id);
44507 ret = cm_id->event_handler(cm_id, &cm_event);
44508- atomic_inc(&cm_closes);
44509+ atomic_inc_unchecked(&cm_closes);
44510 cm_event.event = IW_CM_EVENT_CLOSE;
44511 cm_event.status = 0;
44512 cm_event.provider_data = cm_id->provider_data;
44513@@ -3981,7 +3981,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
44514 return;
44515 cm_id = cm_node->cm_id;
44516
44517- atomic_inc(&cm_connect_reqs);
44518+ atomic_inc_unchecked(&cm_connect_reqs);
44519 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
44520 cm_node, cm_id, jiffies);
44521
44522@@ -4030,7 +4030,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
44523 return;
44524 cm_id = cm_node->cm_id;
44525
44526- atomic_inc(&cm_connect_reqs);
44527+ atomic_inc_unchecked(&cm_connect_reqs);
44528 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
44529 cm_node, cm_id, jiffies);
44530
44531diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
44532index 4166452..fc952c3 100644
44533--- a/drivers/infiniband/hw/nes/nes_mgt.c
44534+++ b/drivers/infiniband/hw/nes/nes_mgt.c
44535@@ -40,8 +40,8 @@
44536 #include "nes.h"
44537 #include "nes_mgt.h"
44538
44539-atomic_t pau_qps_created;
44540-atomic_t pau_qps_destroyed;
44541+atomic_unchecked_t pau_qps_created;
44542+atomic_unchecked_t pau_qps_destroyed;
44543
44544 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
44545 {
44546@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
44547 {
44548 struct sk_buff *skb;
44549 unsigned long flags;
44550- atomic_inc(&pau_qps_destroyed);
44551+ atomic_inc_unchecked(&pau_qps_destroyed);
44552
44553 /* Free packets that have not yet been forwarded */
44554 /* Lock is acquired by skb_dequeue when removing the skb */
44555@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
44556 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
44557 skb_queue_head_init(&nesqp->pau_list);
44558 spin_lock_init(&nesqp->pau_lock);
44559- atomic_inc(&pau_qps_created);
44560+ atomic_inc_unchecked(&pau_qps_created);
44561 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
44562 }
44563
44564diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
44565index 49eb511..a774366 100644
44566--- a/drivers/infiniband/hw/nes/nes_nic.c
44567+++ b/drivers/infiniband/hw/nes/nes_nic.c
44568@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
44569 target_stat_values[++index] = mh_detected;
44570 target_stat_values[++index] = mh_pauses_sent;
44571 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
44572- target_stat_values[++index] = atomic_read(&cm_connects);
44573- target_stat_values[++index] = atomic_read(&cm_accepts);
44574- target_stat_values[++index] = atomic_read(&cm_disconnects);
44575- target_stat_values[++index] = atomic_read(&cm_connecteds);
44576- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
44577- target_stat_values[++index] = atomic_read(&cm_rejects);
44578- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
44579- target_stat_values[++index] = atomic_read(&qps_created);
44580- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
44581- target_stat_values[++index] = atomic_read(&qps_destroyed);
44582- target_stat_values[++index] = atomic_read(&cm_closes);
44583+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
44584+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
44585+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
44586+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
44587+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
44588+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
44589+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
44590+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
44591+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
44592+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
44593+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
44594 target_stat_values[++index] = cm_packets_sent;
44595 target_stat_values[++index] = cm_packets_bounced;
44596 target_stat_values[++index] = cm_packets_created;
44597 target_stat_values[++index] = cm_packets_received;
44598 target_stat_values[++index] = cm_packets_dropped;
44599 target_stat_values[++index] = cm_packets_retrans;
44600- target_stat_values[++index] = atomic_read(&cm_listens_created);
44601- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
44602+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
44603+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
44604 target_stat_values[++index] = cm_backlog_drops;
44605- target_stat_values[++index] = atomic_read(&cm_loopbacks);
44606- target_stat_values[++index] = atomic_read(&cm_nodes_created);
44607- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
44608- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
44609- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
44610+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
44611+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
44612+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
44613+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
44614+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
44615 target_stat_values[++index] = nesadapter->free_4kpbl;
44616 target_stat_values[++index] = nesadapter->free_256pbl;
44617 target_stat_values[++index] = int_mod_timer_init;
44618 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
44619 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
44620 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
44621- target_stat_values[++index] = atomic_read(&pau_qps_created);
44622- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
44623+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
44624+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
44625 }
44626
44627 /**
44628diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
44629index fef067c..6a25ccd 100644
44630--- a/drivers/infiniband/hw/nes/nes_verbs.c
44631+++ b/drivers/infiniband/hw/nes/nes_verbs.c
44632@@ -46,9 +46,9 @@
44633
44634 #include <rdma/ib_umem.h>
44635
44636-atomic_t mod_qp_timouts;
44637-atomic_t qps_created;
44638-atomic_t sw_qps_destroyed;
44639+atomic_unchecked_t mod_qp_timouts;
44640+atomic_unchecked_t qps_created;
44641+atomic_unchecked_t sw_qps_destroyed;
44642
44643 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
44644
44645@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
44646 if (init_attr->create_flags)
44647 return ERR_PTR(-EINVAL);
44648
44649- atomic_inc(&qps_created);
44650+ atomic_inc_unchecked(&qps_created);
44651 switch (init_attr->qp_type) {
44652 case IB_QPT_RC:
44653 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
44654@@ -1468,7 +1468,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
44655 struct iw_cm_event cm_event;
44656 int ret = 0;
44657
44658- atomic_inc(&sw_qps_destroyed);
44659+ atomic_inc_unchecked(&sw_qps_destroyed);
44660 nesqp->destroyed = 1;
44661
44662 /* Blow away the connection if it exists. */
44663diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
44664index c00ae09..04e91be 100644
44665--- a/drivers/infiniband/hw/qib/qib.h
44666+++ b/drivers/infiniband/hw/qib/qib.h
44667@@ -52,6 +52,7 @@
44668 #include <linux/kref.h>
44669 #include <linux/sched.h>
44670 #include <linux/kthread.h>
44671+#include <linux/slab.h>
44672
44673 #include "qib_common.h"
44674 #include "qib_verbs.h"
44675diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
44676index 24c41ba..102d71f 100644
44677--- a/drivers/input/gameport/gameport.c
44678+++ b/drivers/input/gameport/gameport.c
44679@@ -490,14 +490,14 @@ EXPORT_SYMBOL(gameport_set_phys);
44680 */
44681 static void gameport_init_port(struct gameport *gameport)
44682 {
44683- static atomic_t gameport_no = ATOMIC_INIT(0);
44684+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
44685
44686 __module_get(THIS_MODULE);
44687
44688 mutex_init(&gameport->drv_mutex);
44689 device_initialize(&gameport->dev);
44690 dev_set_name(&gameport->dev, "gameport%lu",
44691- (unsigned long)atomic_inc_return(&gameport_no) - 1);
44692+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
44693 gameport->dev.bus = &gameport_bus;
44694 gameport->dev.release = gameport_release_port;
44695 if (gameport->parent)
44696diff --git a/drivers/input/input.c b/drivers/input/input.c
44697index 29ca0bb..f4bc2e3 100644
44698--- a/drivers/input/input.c
44699+++ b/drivers/input/input.c
44700@@ -1774,7 +1774,7 @@ EXPORT_SYMBOL_GPL(input_class);
44701 */
44702 struct input_dev *input_allocate_device(void)
44703 {
44704- static atomic_t input_no = ATOMIC_INIT(0);
44705+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
44706 struct input_dev *dev;
44707
44708 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
44709@@ -1789,7 +1789,7 @@ struct input_dev *input_allocate_device(void)
44710 INIT_LIST_HEAD(&dev->node);
44711
44712 dev_set_name(&dev->dev, "input%ld",
44713- (unsigned long) atomic_inc_return(&input_no) - 1);
44714+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
44715
44716 __module_get(THIS_MODULE);
44717 }
44718diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
44719index 4a95b22..874c182 100644
44720--- a/drivers/input/joystick/sidewinder.c
44721+++ b/drivers/input/joystick/sidewinder.c
44722@@ -30,6 +30,7 @@
44723 #include <linux/kernel.h>
44724 #include <linux/module.h>
44725 #include <linux/slab.h>
44726+#include <linux/sched.h>
44727 #include <linux/input.h>
44728 #include <linux/gameport.h>
44729 #include <linux/jiffies.h>
44730diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
44731index 177602c..ec78499 100644
44732--- a/drivers/input/joystick/xpad.c
44733+++ b/drivers/input/joystick/xpad.c
44734@@ -850,7 +850,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
44735
44736 static int xpad_led_probe(struct usb_xpad *xpad)
44737 {
44738- static atomic_t led_seq = ATOMIC_INIT(0);
44739+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
44740 long led_no;
44741 struct xpad_led *led;
44742 struct led_classdev *led_cdev;
44743@@ -863,7 +863,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
44744 if (!led)
44745 return -ENOMEM;
44746
44747- led_no = (long)atomic_inc_return(&led_seq) - 1;
44748+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
44749
44750 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
44751 led->xpad = xpad;
44752diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
44753index 719410f..1896169 100644
44754--- a/drivers/input/misc/ims-pcu.c
44755+++ b/drivers/input/misc/ims-pcu.c
44756@@ -1851,7 +1851,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
44757
44758 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
44759 {
44760- static atomic_t device_no = ATOMIC_INIT(0);
44761+ static atomic_unchecked_t device_no = ATOMIC_INIT(0);
44762
44763 const struct ims_pcu_device_info *info;
44764 int error;
44765@@ -1882,7 +1882,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
44766 }
44767
44768 /* Device appears to be operable, complete initialization */
44769- pcu->device_no = atomic_inc_return(&device_no) - 1;
44770+ pcu->device_no = atomic_inc_return_unchecked(&device_no) - 1;
44771
44772 /*
44773 * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
44774diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
44775index 2f0b39d..7370f13 100644
44776--- a/drivers/input/mouse/psmouse.h
44777+++ b/drivers/input/mouse/psmouse.h
44778@@ -116,7 +116,7 @@ struct psmouse_attribute {
44779 ssize_t (*set)(struct psmouse *psmouse, void *data,
44780 const char *buf, size_t count);
44781 bool protect;
44782-};
44783+} __do_const;
44784 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
44785
44786 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
44787diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
44788index b604564..3f14ae4 100644
44789--- a/drivers/input/mousedev.c
44790+++ b/drivers/input/mousedev.c
44791@@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
44792
44793 spin_unlock_irq(&client->packet_lock);
44794
44795- if (copy_to_user(buffer, data, count))
44796+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
44797 return -EFAULT;
44798
44799 return count;
44800diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
44801index b29134d..394deb0 100644
44802--- a/drivers/input/serio/serio.c
44803+++ b/drivers/input/serio/serio.c
44804@@ -514,7 +514,7 @@ static void serio_release_port(struct device *dev)
44805 */
44806 static void serio_init_port(struct serio *serio)
44807 {
44808- static atomic_t serio_no = ATOMIC_INIT(0);
44809+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
44810
44811 __module_get(THIS_MODULE);
44812
44813@@ -525,7 +525,7 @@ static void serio_init_port(struct serio *serio)
44814 mutex_init(&serio->drv_mutex);
44815 device_initialize(&serio->dev);
44816 dev_set_name(&serio->dev, "serio%ld",
44817- (long)atomic_inc_return(&serio_no) - 1);
44818+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
44819 serio->dev.bus = &serio_bus;
44820 serio->dev.release = serio_release_port;
44821 serio->dev.groups = serio_device_attr_groups;
44822diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
44823index c9a02fe..0debc75 100644
44824--- a/drivers/input/serio/serio_raw.c
44825+++ b/drivers/input/serio/serio_raw.c
44826@@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
44827
44828 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
44829 {
44830- static atomic_t serio_raw_no = ATOMIC_INIT(0);
44831+ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(0);
44832 struct serio_raw *serio_raw;
44833 int err;
44834
44835@@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
44836 }
44837
44838 snprintf(serio_raw->name, sizeof(serio_raw->name),
44839- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no) - 1);
44840+ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no) - 1);
44841 kref_init(&serio_raw->kref);
44842 INIT_LIST_HEAD(&serio_raw->client_list);
44843 init_waitqueue_head(&serio_raw->wait);
44844diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
44845index a83cc2a..64462e6 100644
44846--- a/drivers/iommu/arm-smmu.c
44847+++ b/drivers/iommu/arm-smmu.c
44848@@ -921,7 +921,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
44849 cfg->irptndx = cfg->cbndx;
44850 }
44851
44852- ACCESS_ONCE(smmu_domain->smmu) = smmu;
44853+ ACCESS_ONCE_RW(smmu_domain->smmu) = smmu;
44854 arm_smmu_init_context_bank(smmu_domain);
44855 spin_unlock_irqrestore(&smmu_domain->lock, flags);
44856
44857diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
44858index 33c4395..e06447e 100644
44859--- a/drivers/iommu/irq_remapping.c
44860+++ b/drivers/iommu/irq_remapping.c
44861@@ -354,7 +354,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
44862 void panic_if_irq_remap(const char *msg)
44863 {
44864 if (irq_remapping_enabled)
44865- panic(msg);
44866+ panic("%s", msg);
44867 }
44868
44869 static void ir_ack_apic_edge(struct irq_data *data)
44870@@ -375,10 +375,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
44871
44872 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
44873 {
44874- chip->irq_print_chip = ir_print_prefix;
44875- chip->irq_ack = ir_ack_apic_edge;
44876- chip->irq_eoi = ir_ack_apic_level;
44877- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
44878+ pax_open_kernel();
44879+ *(void **)&chip->irq_print_chip = ir_print_prefix;
44880+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
44881+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
44882+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
44883+ pax_close_kernel();
44884 }
44885
44886 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
44887diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
44888index dda6dbc..f9adebb 100644
44889--- a/drivers/irqchip/irq-gic.c
44890+++ b/drivers/irqchip/irq-gic.c
44891@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
44892 * Supported arch specific GIC irq extension.
44893 * Default make them NULL.
44894 */
44895-struct irq_chip gic_arch_extn = {
44896+irq_chip_no_const gic_arch_extn = {
44897 .irq_eoi = NULL,
44898 .irq_mask = NULL,
44899 .irq_unmask = NULL,
44900@@ -312,7 +312,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
44901 chained_irq_exit(chip, desc);
44902 }
44903
44904-static struct irq_chip gic_chip = {
44905+static irq_chip_no_const gic_chip __read_only = {
44906 .name = "GIC",
44907 .irq_mask = gic_mask_irq,
44908 .irq_unmask = gic_unmask_irq,
44909diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
44910index 8777065..a4a9967 100644
44911--- a/drivers/irqchip/irq-renesas-irqc.c
44912+++ b/drivers/irqchip/irq-renesas-irqc.c
44913@@ -151,7 +151,7 @@ static int irqc_probe(struct platform_device *pdev)
44914 struct irqc_priv *p;
44915 struct resource *io;
44916 struct resource *irq;
44917- struct irq_chip *irq_chip;
44918+ irq_chip_no_const *irq_chip;
44919 const char *name = dev_name(&pdev->dev);
44920 int ret;
44921 int k;
44922diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
44923index 6a2df32..dc962f1 100644
44924--- a/drivers/isdn/capi/capi.c
44925+++ b/drivers/isdn/capi/capi.c
44926@@ -81,8 +81,8 @@ struct capiminor {
44927
44928 struct capi20_appl *ap;
44929 u32 ncci;
44930- atomic_t datahandle;
44931- atomic_t msgid;
44932+ atomic_unchecked_t datahandle;
44933+ atomic_unchecked_t msgid;
44934
44935 struct tty_port port;
44936 int ttyinstop;
44937@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
44938 capimsg_setu16(s, 2, mp->ap->applid);
44939 capimsg_setu8 (s, 4, CAPI_DATA_B3);
44940 capimsg_setu8 (s, 5, CAPI_RESP);
44941- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
44942+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
44943 capimsg_setu32(s, 8, mp->ncci);
44944 capimsg_setu16(s, 12, datahandle);
44945 }
44946@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
44947 mp->outbytes -= len;
44948 spin_unlock_bh(&mp->outlock);
44949
44950- datahandle = atomic_inc_return(&mp->datahandle);
44951+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
44952 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
44953 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
44954 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
44955 capimsg_setu16(skb->data, 2, mp->ap->applid);
44956 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
44957 capimsg_setu8 (skb->data, 5, CAPI_REQ);
44958- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
44959+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
44960 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
44961 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
44962 capimsg_setu16(skb->data, 16, len); /* Data length */
44963diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
44964index b7ae0a0..04590fa 100644
44965--- a/drivers/isdn/gigaset/bas-gigaset.c
44966+++ b/drivers/isdn/gigaset/bas-gigaset.c
44967@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
44968
44969
44970 static const struct gigaset_ops gigops = {
44971- gigaset_write_cmd,
44972- gigaset_write_room,
44973- gigaset_chars_in_buffer,
44974- gigaset_brkchars,
44975- gigaset_init_bchannel,
44976- gigaset_close_bchannel,
44977- gigaset_initbcshw,
44978- gigaset_freebcshw,
44979- gigaset_reinitbcshw,
44980- gigaset_initcshw,
44981- gigaset_freecshw,
44982- gigaset_set_modem_ctrl,
44983- gigaset_baud_rate,
44984- gigaset_set_line_ctrl,
44985- gigaset_isoc_send_skb,
44986- gigaset_isoc_input,
44987+ .write_cmd = gigaset_write_cmd,
44988+ .write_room = gigaset_write_room,
44989+ .chars_in_buffer = gigaset_chars_in_buffer,
44990+ .brkchars = gigaset_brkchars,
44991+ .init_bchannel = gigaset_init_bchannel,
44992+ .close_bchannel = gigaset_close_bchannel,
44993+ .initbcshw = gigaset_initbcshw,
44994+ .freebcshw = gigaset_freebcshw,
44995+ .reinitbcshw = gigaset_reinitbcshw,
44996+ .initcshw = gigaset_initcshw,
44997+ .freecshw = gigaset_freecshw,
44998+ .set_modem_ctrl = gigaset_set_modem_ctrl,
44999+ .baud_rate = gigaset_baud_rate,
45000+ .set_line_ctrl = gigaset_set_line_ctrl,
45001+ .send_skb = gigaset_isoc_send_skb,
45002+ .handle_input = gigaset_isoc_input,
45003 };
45004
45005 /* bas_gigaset_init
45006diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
45007index 600c79b..3752bab 100644
45008--- a/drivers/isdn/gigaset/interface.c
45009+++ b/drivers/isdn/gigaset/interface.c
45010@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
45011 }
45012 tty->driver_data = cs;
45013
45014- ++cs->port.count;
45015+ atomic_inc(&cs->port.count);
45016
45017- if (cs->port.count == 1) {
45018+ if (atomic_read(&cs->port.count) == 1) {
45019 tty_port_tty_set(&cs->port, tty);
45020 cs->port.low_latency = 1;
45021 }
45022@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
45023
45024 if (!cs->connected)
45025 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
45026- else if (!cs->port.count)
45027+ else if (!atomic_read(&cs->port.count))
45028 dev_warn(cs->dev, "%s: device not opened\n", __func__);
45029- else if (!--cs->port.count)
45030+ else if (!atomic_dec_return(&cs->port.count))
45031 tty_port_tty_set(&cs->port, NULL);
45032
45033 mutex_unlock(&cs->mutex);
45034diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
45035index 8c91fd5..14f13ce 100644
45036--- a/drivers/isdn/gigaset/ser-gigaset.c
45037+++ b/drivers/isdn/gigaset/ser-gigaset.c
45038@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
45039 }
45040
45041 static const struct gigaset_ops ops = {
45042- gigaset_write_cmd,
45043- gigaset_write_room,
45044- gigaset_chars_in_buffer,
45045- gigaset_brkchars,
45046- gigaset_init_bchannel,
45047- gigaset_close_bchannel,
45048- gigaset_initbcshw,
45049- gigaset_freebcshw,
45050- gigaset_reinitbcshw,
45051- gigaset_initcshw,
45052- gigaset_freecshw,
45053- gigaset_set_modem_ctrl,
45054- gigaset_baud_rate,
45055- gigaset_set_line_ctrl,
45056- gigaset_m10x_send_skb, /* asyncdata.c */
45057- gigaset_m10x_input, /* asyncdata.c */
45058+ .write_cmd = gigaset_write_cmd,
45059+ .write_room = gigaset_write_room,
45060+ .chars_in_buffer = gigaset_chars_in_buffer,
45061+ .brkchars = gigaset_brkchars,
45062+ .init_bchannel = gigaset_init_bchannel,
45063+ .close_bchannel = gigaset_close_bchannel,
45064+ .initbcshw = gigaset_initbcshw,
45065+ .freebcshw = gigaset_freebcshw,
45066+ .reinitbcshw = gigaset_reinitbcshw,
45067+ .initcshw = gigaset_initcshw,
45068+ .freecshw = gigaset_freecshw,
45069+ .set_modem_ctrl = gigaset_set_modem_ctrl,
45070+ .baud_rate = gigaset_baud_rate,
45071+ .set_line_ctrl = gigaset_set_line_ctrl,
45072+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
45073+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
45074 };
45075
45076
45077diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
45078index d0a41cb..b953e50 100644
45079--- a/drivers/isdn/gigaset/usb-gigaset.c
45080+++ b/drivers/isdn/gigaset/usb-gigaset.c
45081@@ -547,7 +547,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
45082 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
45083 memcpy(cs->hw.usb->bchars, buf, 6);
45084 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
45085- 0, 0, &buf, 6, 2000);
45086+ 0, 0, buf, 6, 2000);
45087 }
45088
45089 static void gigaset_freebcshw(struct bc_state *bcs)
45090@@ -869,22 +869,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
45091 }
45092
45093 static const struct gigaset_ops ops = {
45094- gigaset_write_cmd,
45095- gigaset_write_room,
45096- gigaset_chars_in_buffer,
45097- gigaset_brkchars,
45098- gigaset_init_bchannel,
45099- gigaset_close_bchannel,
45100- gigaset_initbcshw,
45101- gigaset_freebcshw,
45102- gigaset_reinitbcshw,
45103- gigaset_initcshw,
45104- gigaset_freecshw,
45105- gigaset_set_modem_ctrl,
45106- gigaset_baud_rate,
45107- gigaset_set_line_ctrl,
45108- gigaset_m10x_send_skb,
45109- gigaset_m10x_input,
45110+ .write_cmd = gigaset_write_cmd,
45111+ .write_room = gigaset_write_room,
45112+ .chars_in_buffer = gigaset_chars_in_buffer,
45113+ .brkchars = gigaset_brkchars,
45114+ .init_bchannel = gigaset_init_bchannel,
45115+ .close_bchannel = gigaset_close_bchannel,
45116+ .initbcshw = gigaset_initbcshw,
45117+ .freebcshw = gigaset_freebcshw,
45118+ .reinitbcshw = gigaset_reinitbcshw,
45119+ .initcshw = gigaset_initcshw,
45120+ .freecshw = gigaset_freecshw,
45121+ .set_modem_ctrl = gigaset_set_modem_ctrl,
45122+ .baud_rate = gigaset_baud_rate,
45123+ .set_line_ctrl = gigaset_set_line_ctrl,
45124+ .send_skb = gigaset_m10x_send_skb,
45125+ .handle_input = gigaset_m10x_input,
45126 };
45127
45128 /*
45129diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
45130index 4d9b195..455075c 100644
45131--- a/drivers/isdn/hardware/avm/b1.c
45132+++ b/drivers/isdn/hardware/avm/b1.c
45133@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
45134 }
45135 if (left) {
45136 if (t4file->user) {
45137- if (copy_from_user(buf, dp, left))
45138+ if (left > sizeof buf || copy_from_user(buf, dp, left))
45139 return -EFAULT;
45140 } else {
45141 memcpy(buf, dp, left);
45142@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
45143 }
45144 if (left) {
45145 if (config->user) {
45146- if (copy_from_user(buf, dp, left))
45147+ if (left > sizeof buf || copy_from_user(buf, dp, left))
45148 return -EFAULT;
45149 } else {
45150 memcpy(buf, dp, left);
45151diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
45152index 9b856e1..fa03c92 100644
45153--- a/drivers/isdn/i4l/isdn_common.c
45154+++ b/drivers/isdn/i4l/isdn_common.c
45155@@ -1654,6 +1654,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
45156 } else
45157 return -EINVAL;
45158 case IIOCDBGVAR:
45159+ if (!capable(CAP_SYS_RAWIO))
45160+ return -EPERM;
45161 if (arg) {
45162 if (copy_to_user(argp, &dev, sizeof(ulong)))
45163 return -EFAULT;
45164diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
45165index 91d5730..336523e 100644
45166--- a/drivers/isdn/i4l/isdn_concap.c
45167+++ b/drivers/isdn/i4l/isdn_concap.c
45168@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
45169 }
45170
45171 struct concap_device_ops isdn_concap_reliable_dl_dops = {
45172- &isdn_concap_dl_data_req,
45173- &isdn_concap_dl_connect_req,
45174- &isdn_concap_dl_disconn_req
45175+ .data_req = &isdn_concap_dl_data_req,
45176+ .connect_req = &isdn_concap_dl_connect_req,
45177+ .disconn_req = &isdn_concap_dl_disconn_req
45178 };
45179
45180 /* The following should better go into a dedicated source file such that
45181diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
45182index 3c5f249..5fac4d0 100644
45183--- a/drivers/isdn/i4l/isdn_tty.c
45184+++ b/drivers/isdn/i4l/isdn_tty.c
45185@@ -1508,9 +1508,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
45186
45187 #ifdef ISDN_DEBUG_MODEM_OPEN
45188 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
45189- port->count);
45190+ atomic_read(&port->count));
45191 #endif
45192- port->count++;
45193+ atomic_inc(&port->count);
45194 port->tty = tty;
45195 /*
45196 * Start up serial port
45197@@ -1554,7 +1554,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
45198 #endif
45199 return;
45200 }
45201- if ((tty->count == 1) && (port->count != 1)) {
45202+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
45203 /*
45204 * Uh, oh. tty->count is 1, which means that the tty
45205 * structure will be freed. Info->count should always
45206@@ -1563,15 +1563,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
45207 * serial port won't be shutdown.
45208 */
45209 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
45210- "info->count is %d\n", port->count);
45211- port->count = 1;
45212+ "info->count is %d\n", atomic_read(&port->count));
45213+ atomic_set(&port->count, 1);
45214 }
45215- if (--port->count < 0) {
45216+ if (atomic_dec_return(&port->count) < 0) {
45217 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
45218- info->line, port->count);
45219- port->count = 0;
45220+ info->line, atomic_read(&port->count));
45221+ atomic_set(&port->count, 0);
45222 }
45223- if (port->count) {
45224+ if (atomic_read(&port->count)) {
45225 #ifdef ISDN_DEBUG_MODEM_OPEN
45226 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
45227 #endif
45228@@ -1625,7 +1625,7 @@ isdn_tty_hangup(struct tty_struct *tty)
45229 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
45230 return;
45231 isdn_tty_shutdown(info);
45232- port->count = 0;
45233+ atomic_set(&port->count, 0);
45234 port->flags &= ~ASYNC_NORMAL_ACTIVE;
45235 port->tty = NULL;
45236 wake_up_interruptible(&port->open_wait);
45237@@ -1970,7 +1970,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
45238 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
45239 modem_info *info = &dev->mdm.info[i];
45240
45241- if (info->port.count == 0)
45242+ if (atomic_read(&info->port.count) == 0)
45243 continue;
45244 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
45245 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
45246diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
45247index e2d4e58..40cd045 100644
45248--- a/drivers/isdn/i4l/isdn_x25iface.c
45249+++ b/drivers/isdn/i4l/isdn_x25iface.c
45250@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
45251
45252
45253 static struct concap_proto_ops ix25_pops = {
45254- &isdn_x25iface_proto_new,
45255- &isdn_x25iface_proto_del,
45256- &isdn_x25iface_proto_restart,
45257- &isdn_x25iface_proto_close,
45258- &isdn_x25iface_xmit,
45259- &isdn_x25iface_receive,
45260- &isdn_x25iface_connect_ind,
45261- &isdn_x25iface_disconn_ind
45262+ .proto_new = &isdn_x25iface_proto_new,
45263+ .proto_del = &isdn_x25iface_proto_del,
45264+ .restart = &isdn_x25iface_proto_restart,
45265+ .close = &isdn_x25iface_proto_close,
45266+ .encap_and_xmit = &isdn_x25iface_xmit,
45267+ .data_ind = &isdn_x25iface_receive,
45268+ .connect_ind = &isdn_x25iface_connect_ind,
45269+ .disconn_ind = &isdn_x25iface_disconn_ind
45270 };
45271
45272 /* error message helper function */
45273diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
45274index 6a7447c..cae33fe 100644
45275--- a/drivers/isdn/icn/icn.c
45276+++ b/drivers/isdn/icn/icn.c
45277@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
45278 if (count > len)
45279 count = len;
45280 if (user) {
45281- if (copy_from_user(msg, buf, count))
45282+ if (count > sizeof msg || copy_from_user(msg, buf, count))
45283 return -EFAULT;
45284 } else
45285 memcpy(msg, buf, count);
45286diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
45287index a4f05c5..1433bc5 100644
45288--- a/drivers/isdn/mISDN/dsp_cmx.c
45289+++ b/drivers/isdn/mISDN/dsp_cmx.c
45290@@ -1628,7 +1628,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
45291 static u16 dsp_count; /* last sample count */
45292 static int dsp_count_valid; /* if we have last sample count */
45293
45294-void
45295+void __intentional_overflow(-1)
45296 dsp_cmx_send(void *arg)
45297 {
45298 struct dsp_conf *conf;
45299diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
45300index f58a354..fbae176 100644
45301--- a/drivers/leds/leds-clevo-mail.c
45302+++ b/drivers/leds/leds-clevo-mail.c
45303@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
45304 * detected as working, but in reality it is not) as low as
45305 * possible.
45306 */
45307-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
45308+static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
45309 {
45310 .callback = clevo_mail_led_dmi_callback,
45311 .ident = "Clevo D410J",
45312diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
45313index 046cb70..6b20d39 100644
45314--- a/drivers/leds/leds-ss4200.c
45315+++ b/drivers/leds/leds-ss4200.c
45316@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
45317 * detected as working, but in reality it is not) as low as
45318 * possible.
45319 */
45320-static struct dmi_system_id nas_led_whitelist[] __initdata = {
45321+static struct dmi_system_id nas_led_whitelist[] __initconst = {
45322 {
45323 .callback = ss4200_led_dmi_callback,
45324 .ident = "Intel SS4200-E",
45325diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
45326index 6590558..a74c5dd 100644
45327--- a/drivers/lguest/core.c
45328+++ b/drivers/lguest/core.c
45329@@ -96,9 +96,17 @@ static __init int map_switcher(void)
45330 * The end address needs +1 because __get_vm_area allocates an
45331 * extra guard page, so we need space for that.
45332 */
45333+
45334+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
45335+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
45336+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
45337+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
45338+#else
45339 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
45340 VM_ALLOC, switcher_addr, switcher_addr
45341 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
45342+#endif
45343+
45344 if (!switcher_vma) {
45345 err = -ENOMEM;
45346 printk("lguest: could not map switcher pages high\n");
45347@@ -121,7 +129,7 @@ static __init int map_switcher(void)
45348 * Now the Switcher is mapped at the right address, we can't fail!
45349 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
45350 */
45351- memcpy(switcher_vma->addr, start_switcher_text,
45352+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
45353 end_switcher_text - start_switcher_text);
45354
45355 printk(KERN_INFO "lguest: mapped switcher at %p\n",
45356diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
45357index e8b55c3..3514c37 100644
45358--- a/drivers/lguest/page_tables.c
45359+++ b/drivers/lguest/page_tables.c
45360@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
45361 /*:*/
45362
45363 #ifdef CONFIG_X86_PAE
45364-static void release_pmd(pmd_t *spmd)
45365+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
45366 {
45367 /* If the entry's not present, there's nothing to release. */
45368 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
45369diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
45370index 922a1ac..9dd0c2a 100644
45371--- a/drivers/lguest/x86/core.c
45372+++ b/drivers/lguest/x86/core.c
45373@@ -59,7 +59,7 @@ static struct {
45374 /* Offset from where switcher.S was compiled to where we've copied it */
45375 static unsigned long switcher_offset(void)
45376 {
45377- return switcher_addr - (unsigned long)start_switcher_text;
45378+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
45379 }
45380
45381 /* This cpu's struct lguest_pages (after the Switcher text page) */
45382@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
45383 * These copies are pretty cheap, so we do them unconditionally: */
45384 /* Save the current Host top-level page directory.
45385 */
45386+
45387+#ifdef CONFIG_PAX_PER_CPU_PGD
45388+ pages->state.host_cr3 = read_cr3();
45389+#else
45390 pages->state.host_cr3 = __pa(current->mm->pgd);
45391+#endif
45392+
45393 /*
45394 * Set up the Guest's page tables to see this CPU's pages (and no
45395 * other CPU's pages).
45396@@ -477,7 +483,7 @@ void __init lguest_arch_host_init(void)
45397 * compiled-in switcher code and the high-mapped copy we just made.
45398 */
45399 for (i = 0; i < IDT_ENTRIES; i++)
45400- default_idt_entries[i] += switcher_offset();
45401+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
45402
45403 /*
45404 * Set up the Switcher's per-cpu areas.
45405@@ -560,7 +566,7 @@ void __init lguest_arch_host_init(void)
45406 * it will be undisturbed when we switch. To change %cs and jump we
45407 * need this structure to feed to Intel's "lcall" instruction.
45408 */
45409- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
45410+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
45411 lguest_entry.segment = LGUEST_CS;
45412
45413 /*
45414diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
45415index 40634b0..4f5855e 100644
45416--- a/drivers/lguest/x86/switcher_32.S
45417+++ b/drivers/lguest/x86/switcher_32.S
45418@@ -87,6 +87,7 @@
45419 #include <asm/page.h>
45420 #include <asm/segment.h>
45421 #include <asm/lguest.h>
45422+#include <asm/processor-flags.h>
45423
45424 // We mark the start of the code to copy
45425 // It's placed in .text tho it's never run here
45426@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
45427 // Changes type when we load it: damn Intel!
45428 // For after we switch over our page tables
45429 // That entry will be read-only: we'd crash.
45430+
45431+#ifdef CONFIG_PAX_KERNEXEC
45432+ mov %cr0, %edx
45433+ xor $X86_CR0_WP, %edx
45434+ mov %edx, %cr0
45435+#endif
45436+
45437 movl $(GDT_ENTRY_TSS*8), %edx
45438 ltr %dx
45439
45440@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
45441 // Let's clear it again for our return.
45442 // The GDT descriptor of the Host
45443 // Points to the table after two "size" bytes
45444- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
45445+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
45446 // Clear "used" from type field (byte 5, bit 2)
45447- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
45448+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
45449+
45450+#ifdef CONFIG_PAX_KERNEXEC
45451+ mov %cr0, %eax
45452+ xor $X86_CR0_WP, %eax
45453+ mov %eax, %cr0
45454+#endif
45455
45456 // Once our page table's switched, the Guest is live!
45457 // The Host fades as we run this final step.
45458@@ -295,13 +309,12 @@ deliver_to_host:
45459 // I consulted gcc, and it gave
45460 // These instructions, which I gladly credit:
45461 leal (%edx,%ebx,8), %eax
45462- movzwl (%eax),%edx
45463- movl 4(%eax), %eax
45464- xorw %ax, %ax
45465- orl %eax, %edx
45466+ movl 4(%eax), %edx
45467+ movw (%eax), %dx
45468 // Now the address of the handler's in %edx
45469 // We call it now: its "iret" drops us home.
45470- jmp *%edx
45471+ ljmp $__KERNEL_CS, $1f
45472+1: jmp *%edx
45473
45474 // Every interrupt can come to us here
45475 // But we must truly tell each apart.
45476diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
45477index a08e3ee..df8ade2 100644
45478--- a/drivers/md/bcache/closure.h
45479+++ b/drivers/md/bcache/closure.h
45480@@ -238,7 +238,7 @@ static inline void closure_set_stopped(struct closure *cl)
45481 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
45482 struct workqueue_struct *wq)
45483 {
45484- BUG_ON(object_is_on_stack(cl));
45485+ BUG_ON(object_starts_on_stack(cl));
45486 closure_set_ip(cl);
45487 cl->fn = fn;
45488 cl->wq = wq;
45489diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
45490index 67f8b31..9418f2b 100644
45491--- a/drivers/md/bitmap.c
45492+++ b/drivers/md/bitmap.c
45493@@ -1775,7 +1775,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
45494 chunk_kb ? "KB" : "B");
45495 if (bitmap->storage.file) {
45496 seq_printf(seq, ", file: ");
45497- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
45498+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
45499 }
45500
45501 seq_printf(seq, "\n");
45502diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
45503index 5152142..623d141 100644
45504--- a/drivers/md/dm-ioctl.c
45505+++ b/drivers/md/dm-ioctl.c
45506@@ -1769,7 +1769,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
45507 cmd == DM_LIST_VERSIONS_CMD)
45508 return 0;
45509
45510- if ((cmd == DM_DEV_CREATE_CMD)) {
45511+ if (cmd == DM_DEV_CREATE_CMD) {
45512 if (!*param->name) {
45513 DMWARN("name not supplied when creating device");
45514 return -EINVAL;
45515diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
45516index 7dfdb5c..4caada6 100644
45517--- a/drivers/md/dm-raid1.c
45518+++ b/drivers/md/dm-raid1.c
45519@@ -40,7 +40,7 @@ enum dm_raid1_error {
45520
45521 struct mirror {
45522 struct mirror_set *ms;
45523- atomic_t error_count;
45524+ atomic_unchecked_t error_count;
45525 unsigned long error_type;
45526 struct dm_dev *dev;
45527 sector_t offset;
45528@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
45529 struct mirror *m;
45530
45531 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
45532- if (!atomic_read(&m->error_count))
45533+ if (!atomic_read_unchecked(&m->error_count))
45534 return m;
45535
45536 return NULL;
45537@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
45538 * simple way to tell if a device has encountered
45539 * errors.
45540 */
45541- atomic_inc(&m->error_count);
45542+ atomic_inc_unchecked(&m->error_count);
45543
45544 if (test_and_set_bit(error_type, &m->error_type))
45545 return;
45546@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
45547 struct mirror *m = get_default_mirror(ms);
45548
45549 do {
45550- if (likely(!atomic_read(&m->error_count)))
45551+ if (likely(!atomic_read_unchecked(&m->error_count)))
45552 return m;
45553
45554 if (m-- == ms->mirror)
45555@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
45556 {
45557 struct mirror *default_mirror = get_default_mirror(m->ms);
45558
45559- return !atomic_read(&default_mirror->error_count);
45560+ return !atomic_read_unchecked(&default_mirror->error_count);
45561 }
45562
45563 static int mirror_available(struct mirror_set *ms, struct bio *bio)
45564@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
45565 */
45566 if (likely(region_in_sync(ms, region, 1)))
45567 m = choose_mirror(ms, bio->bi_iter.bi_sector);
45568- else if (m && atomic_read(&m->error_count))
45569+ else if (m && atomic_read_unchecked(&m->error_count))
45570 m = NULL;
45571
45572 if (likely(m))
45573@@ -927,7 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
45574 }
45575
45576 ms->mirror[mirror].ms = ms;
45577- atomic_set(&(ms->mirror[mirror].error_count), 0);
45578+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
45579 ms->mirror[mirror].error_type = 0;
45580 ms->mirror[mirror].offset = offset;
45581
45582@@ -1342,7 +1342,7 @@ static void mirror_resume(struct dm_target *ti)
45583 */
45584 static char device_status_char(struct mirror *m)
45585 {
45586- if (!atomic_read(&(m->error_count)))
45587+ if (!atomic_read_unchecked(&(m->error_count)))
45588 return 'A';
45589
45590 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
45591diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
45592index 28a9012..9c0f6a5 100644
45593--- a/drivers/md/dm-stats.c
45594+++ b/drivers/md/dm-stats.c
45595@@ -382,7 +382,7 @@ do_sync_free:
45596 synchronize_rcu_expedited();
45597 dm_stat_free(&s->rcu_head);
45598 } else {
45599- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
45600+ ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
45601 call_rcu(&s->rcu_head, dm_stat_free);
45602 }
45603 return 0;
45604@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
45605 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
45606 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
45607 ));
45608- ACCESS_ONCE(last->last_sector) = end_sector;
45609- ACCESS_ONCE(last->last_rw) = bi_rw;
45610+ ACCESS_ONCE_RW(last->last_sector) = end_sector;
45611+ ACCESS_ONCE_RW(last->last_rw) = bi_rw;
45612 }
45613
45614 rcu_read_lock();
45615diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
45616index d1600d2..4c3af3a 100644
45617--- a/drivers/md/dm-stripe.c
45618+++ b/drivers/md/dm-stripe.c
45619@@ -21,7 +21,7 @@ struct stripe {
45620 struct dm_dev *dev;
45621 sector_t physical_start;
45622
45623- atomic_t error_count;
45624+ atomic_unchecked_t error_count;
45625 };
45626
45627 struct stripe_c {
45628@@ -186,7 +186,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
45629 kfree(sc);
45630 return r;
45631 }
45632- atomic_set(&(sc->stripe[i].error_count), 0);
45633+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
45634 }
45635
45636 ti->private = sc;
45637@@ -330,7 +330,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
45638 DMEMIT("%d ", sc->stripes);
45639 for (i = 0; i < sc->stripes; i++) {
45640 DMEMIT("%s ", sc->stripe[i].dev->name);
45641- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
45642+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
45643 'D' : 'A';
45644 }
45645 buffer[i] = '\0';
45646@@ -375,8 +375,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
45647 */
45648 for (i = 0; i < sc->stripes; i++)
45649 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
45650- atomic_inc(&(sc->stripe[i].error_count));
45651- if (atomic_read(&(sc->stripe[i].error_count)) <
45652+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
45653+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
45654 DM_IO_ERROR_THRESHOLD)
45655 schedule_work(&sc->trigger_event);
45656 }
45657diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
45658index f9c6cb8..e272df6 100644
45659--- a/drivers/md/dm-table.c
45660+++ b/drivers/md/dm-table.c
45661@@ -274,7 +274,7 @@ static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
45662 static int open_dev(struct dm_dev_internal *d, dev_t dev,
45663 struct mapped_device *md)
45664 {
45665- static char *_claim_ptr = "I belong to device-mapper";
45666+ static char _claim_ptr[] = "I belong to device-mapper";
45667 struct block_device *bdev;
45668
45669 int r;
45670@@ -342,7 +342,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
45671 if (!dev_size)
45672 return 0;
45673
45674- if ((start >= dev_size) || (start + len > dev_size)) {
45675+ if ((start >= dev_size) || (len > dev_size - start)) {
45676 DMWARN("%s: %s too small for target: "
45677 "start=%llu, len=%llu, dev_size=%llu",
45678 dm_device_name(ti->table->md), bdevname(bdev, b),
45679diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
45680index e9d33ad..dae9880d 100644
45681--- a/drivers/md/dm-thin-metadata.c
45682+++ b/drivers/md/dm-thin-metadata.c
45683@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
45684 {
45685 pmd->info.tm = pmd->tm;
45686 pmd->info.levels = 2;
45687- pmd->info.value_type.context = pmd->data_sm;
45688+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
45689 pmd->info.value_type.size = sizeof(__le64);
45690 pmd->info.value_type.inc = data_block_inc;
45691 pmd->info.value_type.dec = data_block_dec;
45692@@ -423,7 +423,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
45693
45694 pmd->bl_info.tm = pmd->tm;
45695 pmd->bl_info.levels = 1;
45696- pmd->bl_info.value_type.context = pmd->data_sm;
45697+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
45698 pmd->bl_info.value_type.size = sizeof(__le64);
45699 pmd->bl_info.value_type.inc = data_block_inc;
45700 pmd->bl_info.value_type.dec = data_block_dec;
45701diff --git a/drivers/md/dm.c b/drivers/md/dm.c
45702index 32b958d..34011e8 100644
45703--- a/drivers/md/dm.c
45704+++ b/drivers/md/dm.c
45705@@ -180,9 +180,9 @@ struct mapped_device {
45706 /*
45707 * Event handling.
45708 */
45709- atomic_t event_nr;
45710+ atomic_unchecked_t event_nr;
45711 wait_queue_head_t eventq;
45712- atomic_t uevent_seq;
45713+ atomic_unchecked_t uevent_seq;
45714 struct list_head uevent_list;
45715 spinlock_t uevent_lock; /* Protect access to uevent_list */
45716
45717@@ -1952,8 +1952,8 @@ static struct mapped_device *alloc_dev(int minor)
45718 spin_lock_init(&md->deferred_lock);
45719 atomic_set(&md->holders, 1);
45720 atomic_set(&md->open_count, 0);
45721- atomic_set(&md->event_nr, 0);
45722- atomic_set(&md->uevent_seq, 0);
45723+ atomic_set_unchecked(&md->event_nr, 0);
45724+ atomic_set_unchecked(&md->uevent_seq, 0);
45725 INIT_LIST_HEAD(&md->uevent_list);
45726 spin_lock_init(&md->uevent_lock);
45727
45728@@ -2107,7 +2107,7 @@ static void event_callback(void *context)
45729
45730 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
45731
45732- atomic_inc(&md->event_nr);
45733+ atomic_inc_unchecked(&md->event_nr);
45734 wake_up(&md->eventq);
45735 }
45736
45737@@ -2800,18 +2800,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
45738
45739 uint32_t dm_next_uevent_seq(struct mapped_device *md)
45740 {
45741- return atomic_add_return(1, &md->uevent_seq);
45742+ return atomic_add_return_unchecked(1, &md->uevent_seq);
45743 }
45744
45745 uint32_t dm_get_event_nr(struct mapped_device *md)
45746 {
45747- return atomic_read(&md->event_nr);
45748+ return atomic_read_unchecked(&md->event_nr);
45749 }
45750
45751 int dm_wait_event(struct mapped_device *md, int event_nr)
45752 {
45753 return wait_event_interruptible(md->eventq,
45754- (event_nr != atomic_read(&md->event_nr)));
45755+ (event_nr != atomic_read_unchecked(&md->event_nr)));
45756 }
45757
45758 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
45759diff --git a/drivers/md/md.c b/drivers/md/md.c
45760index 1294238..a442227 100644
45761--- a/drivers/md/md.c
45762+++ b/drivers/md/md.c
45763@@ -194,10 +194,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
45764 * start build, activate spare
45765 */
45766 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
45767-static atomic_t md_event_count;
45768+static atomic_unchecked_t md_event_count;
45769 void md_new_event(struct mddev *mddev)
45770 {
45771- atomic_inc(&md_event_count);
45772+ atomic_inc_unchecked(&md_event_count);
45773 wake_up(&md_event_waiters);
45774 }
45775 EXPORT_SYMBOL_GPL(md_new_event);
45776@@ -207,7 +207,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
45777 */
45778 static void md_new_event_inintr(struct mddev *mddev)
45779 {
45780- atomic_inc(&md_event_count);
45781+ atomic_inc_unchecked(&md_event_count);
45782 wake_up(&md_event_waiters);
45783 }
45784
45785@@ -1462,7 +1462,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
45786 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
45787 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
45788 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
45789- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
45790+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
45791
45792 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
45793 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
45794@@ -1713,7 +1713,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
45795 else
45796 sb->resync_offset = cpu_to_le64(0);
45797
45798- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
45799+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
45800
45801 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
45802 sb->size = cpu_to_le64(mddev->dev_sectors);
45803@@ -2725,7 +2725,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
45804 static ssize_t
45805 errors_show(struct md_rdev *rdev, char *page)
45806 {
45807- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
45808+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
45809 }
45810
45811 static ssize_t
45812@@ -2734,7 +2734,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
45813 char *e;
45814 unsigned long n = simple_strtoul(buf, &e, 10);
45815 if (*buf && (*e == 0 || *e == '\n')) {
45816- atomic_set(&rdev->corrected_errors, n);
45817+ atomic_set_unchecked(&rdev->corrected_errors, n);
45818 return len;
45819 }
45820 return -EINVAL;
45821@@ -3183,8 +3183,8 @@ int md_rdev_init(struct md_rdev *rdev)
45822 rdev->sb_loaded = 0;
45823 rdev->bb_page = NULL;
45824 atomic_set(&rdev->nr_pending, 0);
45825- atomic_set(&rdev->read_errors, 0);
45826- atomic_set(&rdev->corrected_errors, 0);
45827+ atomic_set_unchecked(&rdev->read_errors, 0);
45828+ atomic_set_unchecked(&rdev->corrected_errors, 0);
45829
45830 INIT_LIST_HEAD(&rdev->same_set);
45831 init_waitqueue_head(&rdev->blocked_wait);
45832@@ -7068,7 +7068,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
45833
45834 spin_unlock(&pers_lock);
45835 seq_printf(seq, "\n");
45836- seq->poll_event = atomic_read(&md_event_count);
45837+ seq->poll_event = atomic_read_unchecked(&md_event_count);
45838 return 0;
45839 }
45840 if (v == (void*)2) {
45841@@ -7171,7 +7171,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
45842 return error;
45843
45844 seq = file->private_data;
45845- seq->poll_event = atomic_read(&md_event_count);
45846+ seq->poll_event = atomic_read_unchecked(&md_event_count);
45847 return error;
45848 }
45849
45850@@ -7188,7 +7188,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
45851 /* always allow read */
45852 mask = POLLIN | POLLRDNORM;
45853
45854- if (seq->poll_event != atomic_read(&md_event_count))
45855+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
45856 mask |= POLLERR | POLLPRI;
45857 return mask;
45858 }
45859@@ -7232,7 +7232,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
45860 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
45861 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
45862 (int)part_stat_read(&disk->part0, sectors[1]) -
45863- atomic_read(&disk->sync_io);
45864+ atomic_read_unchecked(&disk->sync_io);
45865 /* sync IO will cause sync_io to increase before the disk_stats
45866 * as sync_io is counted when a request starts, and
45867 * disk_stats is counted when it completes.
45868diff --git a/drivers/md/md.h b/drivers/md/md.h
45869index a49d991..3582bb7 100644
45870--- a/drivers/md/md.h
45871+++ b/drivers/md/md.h
45872@@ -94,13 +94,13 @@ struct md_rdev {
45873 * only maintained for arrays that
45874 * support hot removal
45875 */
45876- atomic_t read_errors; /* number of consecutive read errors that
45877+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
45878 * we have tried to ignore.
45879 */
45880 struct timespec last_read_error; /* monotonic time since our
45881 * last read error
45882 */
45883- atomic_t corrected_errors; /* number of corrected read errors,
45884+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
45885 * for reporting to userspace and storing
45886 * in superblock.
45887 */
45888@@ -449,7 +449,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
45889
45890 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
45891 {
45892- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
45893+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
45894 }
45895
45896 struct md_personality
45897diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
45898index 786b689..ea8c956 100644
45899--- a/drivers/md/persistent-data/dm-space-map-metadata.c
45900+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
45901@@ -679,7 +679,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
45902 * Flick into a mode where all blocks get allocated in the new area.
45903 */
45904 smm->begin = old_len;
45905- memcpy(sm, &bootstrap_ops, sizeof(*sm));
45906+ memcpy((void *)sm, &bootstrap_ops, sizeof(*sm));
45907
45908 /*
45909 * Extend.
45910@@ -710,7 +710,7 @@ out:
45911 /*
45912 * Switch back to normal behaviour.
45913 */
45914- memcpy(sm, &ops, sizeof(*sm));
45915+ memcpy((void *)sm, &ops, sizeof(*sm));
45916 return r;
45917 }
45918
45919diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
45920index 3e6d115..ffecdeb 100644
45921--- a/drivers/md/persistent-data/dm-space-map.h
45922+++ b/drivers/md/persistent-data/dm-space-map.h
45923@@ -71,6 +71,7 @@ struct dm_space_map {
45924 dm_sm_threshold_fn fn,
45925 void *context);
45926 };
45927+typedef struct dm_space_map __no_const dm_space_map_no_const;
45928
45929 /*----------------------------------------------------------------*/
45930
45931diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
45932index 55de4f6..b1c57fe 100644
45933--- a/drivers/md/raid1.c
45934+++ b/drivers/md/raid1.c
45935@@ -1936,7 +1936,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
45936 if (r1_sync_page_io(rdev, sect, s,
45937 bio->bi_io_vec[idx].bv_page,
45938 READ) != 0)
45939- atomic_add(s, &rdev->corrected_errors);
45940+ atomic_add_unchecked(s, &rdev->corrected_errors);
45941 }
45942 sectors -= s;
45943 sect += s;
45944@@ -2170,7 +2170,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
45945 !test_bit(Faulty, &rdev->flags)) {
45946 if (r1_sync_page_io(rdev, sect, s,
45947 conf->tmppage, READ)) {
45948- atomic_add(s, &rdev->corrected_errors);
45949+ atomic_add_unchecked(s, &rdev->corrected_errors);
45950 printk(KERN_INFO
45951 "md/raid1:%s: read error corrected "
45952 "(%d sectors at %llu on %s)\n",
45953diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
45954index 6703751..187af1e 100644
45955--- a/drivers/md/raid10.c
45956+++ b/drivers/md/raid10.c
45957@@ -1948,7 +1948,7 @@ static void end_sync_read(struct bio *bio, int error)
45958 /* The write handler will notice the lack of
45959 * R10BIO_Uptodate and record any errors etc
45960 */
45961- atomic_add(r10_bio->sectors,
45962+ atomic_add_unchecked(r10_bio->sectors,
45963 &conf->mirrors[d].rdev->corrected_errors);
45964
45965 /* for reconstruct, we always reschedule after a read.
45966@@ -2306,7 +2306,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
45967 {
45968 struct timespec cur_time_mon;
45969 unsigned long hours_since_last;
45970- unsigned int read_errors = atomic_read(&rdev->read_errors);
45971+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
45972
45973 ktime_get_ts(&cur_time_mon);
45974
45975@@ -2328,9 +2328,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
45976 * overflowing the shift of read_errors by hours_since_last.
45977 */
45978 if (hours_since_last >= 8 * sizeof(read_errors))
45979- atomic_set(&rdev->read_errors, 0);
45980+ atomic_set_unchecked(&rdev->read_errors, 0);
45981 else
45982- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
45983+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
45984 }
45985
45986 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
45987@@ -2384,8 +2384,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
45988 return;
45989
45990 check_decay_read_errors(mddev, rdev);
45991- atomic_inc(&rdev->read_errors);
45992- if (atomic_read(&rdev->read_errors) > max_read_errors) {
45993+ atomic_inc_unchecked(&rdev->read_errors);
45994+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
45995 char b[BDEVNAME_SIZE];
45996 bdevname(rdev->bdev, b);
45997
45998@@ -2393,7 +2393,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
45999 "md/raid10:%s: %s: Raid device exceeded "
46000 "read_error threshold [cur %d:max %d]\n",
46001 mdname(mddev), b,
46002- atomic_read(&rdev->read_errors), max_read_errors);
46003+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
46004 printk(KERN_NOTICE
46005 "md/raid10:%s: %s: Failing raid device\n",
46006 mdname(mddev), b);
46007@@ -2548,7 +2548,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
46008 sect +
46009 choose_data_offset(r10_bio, rdev)),
46010 bdevname(rdev->bdev, b));
46011- atomic_add(s, &rdev->corrected_errors);
46012+ atomic_add_unchecked(s, &rdev->corrected_errors);
46013 }
46014
46015 rdev_dec_pending(rdev, mddev);
46016diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
46017index 9f0fbec..991e7a1 100644
46018--- a/drivers/md/raid5.c
46019+++ b/drivers/md/raid5.c
46020@@ -1735,6 +1735,10 @@ static int grow_one_stripe(struct r5conf *conf, int hash)
46021 return 1;
46022 }
46023
46024+#ifdef CONFIG_GRKERNSEC_HIDESYM
46025+static atomic_unchecked_t raid5_cache_id = ATOMIC_INIT(0);
46026+#endif
46027+
46028 static int grow_stripes(struct r5conf *conf, int num)
46029 {
46030 struct kmem_cache *sc;
46031@@ -1746,7 +1750,11 @@ static int grow_stripes(struct r5conf *conf, int num)
46032 "raid%d-%s", conf->level, mdname(conf->mddev));
46033 else
46034 sprintf(conf->cache_name[0],
46035+#ifdef CONFIG_GRKERNSEC_HIDESYM
46036+ "raid%d-%08lx", conf->level, atomic_inc_return_unchecked(&raid5_cache_id));
46037+#else
46038 "raid%d-%p", conf->level, conf->mddev);
46039+#endif
46040 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
46041
46042 conf->active_name = 0;
46043@@ -2022,21 +2030,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
46044 mdname(conf->mddev), STRIPE_SECTORS,
46045 (unsigned long long)s,
46046 bdevname(rdev->bdev, b));
46047- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
46048+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
46049 clear_bit(R5_ReadError, &sh->dev[i].flags);
46050 clear_bit(R5_ReWrite, &sh->dev[i].flags);
46051 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
46052 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
46053
46054- if (atomic_read(&rdev->read_errors))
46055- atomic_set(&rdev->read_errors, 0);
46056+ if (atomic_read_unchecked(&rdev->read_errors))
46057+ atomic_set_unchecked(&rdev->read_errors, 0);
46058 } else {
46059 const char *bdn = bdevname(rdev->bdev, b);
46060 int retry = 0;
46061 int set_bad = 0;
46062
46063 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
46064- atomic_inc(&rdev->read_errors);
46065+ atomic_inc_unchecked(&rdev->read_errors);
46066 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
46067 printk_ratelimited(
46068 KERN_WARNING
46069@@ -2064,7 +2072,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
46070 mdname(conf->mddev),
46071 (unsigned long long)s,
46072 bdn);
46073- } else if (atomic_read(&rdev->read_errors)
46074+ } else if (atomic_read_unchecked(&rdev->read_errors)
46075 > conf->max_nr_stripes)
46076 printk(KERN_WARNING
46077 "md/raid:%s: Too many read errors, failing device %s.\n",
46078diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
46079index 983db75..ef9248c 100644
46080--- a/drivers/media/dvb-core/dvbdev.c
46081+++ b/drivers/media/dvb-core/dvbdev.c
46082@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
46083 const struct dvb_device *template, void *priv, int type)
46084 {
46085 struct dvb_device *dvbdev;
46086- struct file_operations *dvbdevfops;
46087+ file_operations_no_const *dvbdevfops;
46088 struct device *clsdev;
46089 int minor;
46090 int id;
46091diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
46092index 539f4db..cdd403b 100644
46093--- a/drivers/media/dvb-frontends/af9033.h
46094+++ b/drivers/media/dvb-frontends/af9033.h
46095@@ -82,7 +82,7 @@ struct af9033_ops {
46096 int (*pid_filter_ctrl)(struct dvb_frontend *fe, int onoff);
46097 int (*pid_filter)(struct dvb_frontend *fe, int index, u16 pid,
46098 int onoff);
46099-};
46100+} __no_const;
46101
46102
46103 #if IS_ENABLED(CONFIG_DVB_AF9033)
46104diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
46105index 9b6c3bb..baeb5c7 100644
46106--- a/drivers/media/dvb-frontends/dib3000.h
46107+++ b/drivers/media/dvb-frontends/dib3000.h
46108@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
46109 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
46110 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
46111 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
46112-};
46113+} __no_const;
46114
46115 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
46116 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
46117diff --git a/drivers/media/dvb-frontends/dib7000p.h b/drivers/media/dvb-frontends/dib7000p.h
46118index 1fea0e9..321ce8f 100644
46119--- a/drivers/media/dvb-frontends/dib7000p.h
46120+++ b/drivers/media/dvb-frontends/dib7000p.h
46121@@ -64,7 +64,7 @@ struct dib7000p_ops {
46122 int (*get_adc_power)(struct dvb_frontend *fe);
46123 int (*slave_reset)(struct dvb_frontend *fe);
46124 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg);
46125-};
46126+} __no_const;
46127
46128 #if IS_ENABLED(CONFIG_DVB_DIB7000P)
46129 void *dib7000p_attach(struct dib7000p_ops *ops);
46130diff --git a/drivers/media/dvb-frontends/dib8000.h b/drivers/media/dvb-frontends/dib8000.h
46131index 84cc103..5780c54 100644
46132--- a/drivers/media/dvb-frontends/dib8000.h
46133+++ b/drivers/media/dvb-frontends/dib8000.h
46134@@ -61,7 +61,7 @@ struct dib8000_ops {
46135 int (*pid_filter_ctrl)(struct dvb_frontend *fe, u8 onoff);
46136 int (*pid_filter)(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff);
46137 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg);
46138-};
46139+} __no_const;
46140
46141 #if IS_ENABLED(CONFIG_DVB_DIB8000)
46142 void *dib8000_attach(struct dib8000_ops *ops);
46143diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
46144index ed8cb90..5ef7f79 100644
46145--- a/drivers/media/pci/cx88/cx88-video.c
46146+++ b/drivers/media/pci/cx88/cx88-video.c
46147@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
46148
46149 /* ------------------------------------------------------------------ */
46150
46151-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46152-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46153-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46154+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46155+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46156+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46157
46158 module_param_array(video_nr, int, NULL, 0444);
46159 module_param_array(vbi_nr, int, NULL, 0444);
46160diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
46161index 802642d..5534900 100644
46162--- a/drivers/media/pci/ivtv/ivtv-driver.c
46163+++ b/drivers/media/pci/ivtv/ivtv-driver.c
46164@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
46165 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
46166
46167 /* ivtv instance counter */
46168-static atomic_t ivtv_instance = ATOMIC_INIT(0);
46169+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
46170
46171 /* Parameter declarations */
46172 static int cardtype[IVTV_MAX_CARDS];
46173diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
46174index 172583d..0f806f4 100644
46175--- a/drivers/media/pci/solo6x10/solo6x10-core.c
46176+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
46177@@ -430,7 +430,7 @@ static void solo_device_release(struct device *dev)
46178
46179 static int solo_sysfs_init(struct solo_dev *solo_dev)
46180 {
46181- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
46182+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
46183 struct device *dev = &solo_dev->dev;
46184 const char *driver;
46185 int i;
46186diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
46187index c7141f2..5301fec 100644
46188--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
46189+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
46190@@ -351,7 +351,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
46191
46192 int solo_g723_init(struct solo_dev *solo_dev)
46193 {
46194- static struct snd_device_ops ops = { NULL };
46195+ static struct snd_device_ops ops = { };
46196 struct snd_card *card;
46197 struct snd_kcontrol_new kctl;
46198 char name[32];
46199diff --git a/drivers/media/pci/solo6x10/solo6x10-p2m.c b/drivers/media/pci/solo6x10/solo6x10-p2m.c
46200index 8c84846..27b4f83 100644
46201--- a/drivers/media/pci/solo6x10/solo6x10-p2m.c
46202+++ b/drivers/media/pci/solo6x10/solo6x10-p2m.c
46203@@ -73,7 +73,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
46204
46205 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
46206 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
46207- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
46208+ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
46209 if (p2m_id < 0)
46210 p2m_id = -p2m_id;
46211 }
46212diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
46213index c6154b0..73e4ae9 100644
46214--- a/drivers/media/pci/solo6x10/solo6x10.h
46215+++ b/drivers/media/pci/solo6x10/solo6x10.h
46216@@ -219,7 +219,7 @@ struct solo_dev {
46217
46218 /* P2M DMA Engine */
46219 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
46220- atomic_t p2m_count;
46221+ atomic_unchecked_t p2m_count;
46222 int p2m_jiffies;
46223 unsigned int p2m_timeouts;
46224
46225diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
46226index 2d177fa..5b925a1 100644
46227--- a/drivers/media/platform/omap/omap_vout.c
46228+++ b/drivers/media/platform/omap/omap_vout.c
46229@@ -63,7 +63,6 @@ enum omap_vout_channels {
46230 OMAP_VIDEO2,
46231 };
46232
46233-static struct videobuf_queue_ops video_vbq_ops;
46234 /* Variables configurable through module params*/
46235 static u32 video1_numbuffers = 3;
46236 static u32 video2_numbuffers = 3;
46237@@ -1014,6 +1013,12 @@ static int omap_vout_open(struct file *file)
46238 {
46239 struct videobuf_queue *q;
46240 struct omap_vout_device *vout = NULL;
46241+ static struct videobuf_queue_ops video_vbq_ops = {
46242+ .buf_setup = omap_vout_buffer_setup,
46243+ .buf_prepare = omap_vout_buffer_prepare,
46244+ .buf_release = omap_vout_buffer_release,
46245+ .buf_queue = omap_vout_buffer_queue,
46246+ };
46247
46248 vout = video_drvdata(file);
46249 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
46250@@ -1031,10 +1036,6 @@ static int omap_vout_open(struct file *file)
46251 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
46252
46253 q = &vout->vbq;
46254- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
46255- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
46256- video_vbq_ops.buf_release = omap_vout_buffer_release;
46257- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
46258 spin_lock_init(&vout->vbq_lock);
46259
46260 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
46261diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
46262index fb2acc5..a2fcbdc4 100644
46263--- a/drivers/media/platform/s5p-tv/mixer.h
46264+++ b/drivers/media/platform/s5p-tv/mixer.h
46265@@ -156,7 +156,7 @@ struct mxr_layer {
46266 /** layer index (unique identifier) */
46267 int idx;
46268 /** callbacks for layer methods */
46269- struct mxr_layer_ops ops;
46270+ struct mxr_layer_ops *ops;
46271 /** format array */
46272 const struct mxr_format **fmt_array;
46273 /** size of format array */
46274diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46275index 74344c7..a39e70e 100644
46276--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46277+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46278@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
46279 {
46280 struct mxr_layer *layer;
46281 int ret;
46282- struct mxr_layer_ops ops = {
46283+ static struct mxr_layer_ops ops = {
46284 .release = mxr_graph_layer_release,
46285 .buffer_set = mxr_graph_buffer_set,
46286 .stream_set = mxr_graph_stream_set,
46287diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
46288index b713403..53cb5ad 100644
46289--- a/drivers/media/platform/s5p-tv/mixer_reg.c
46290+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
46291@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
46292 layer->update_buf = next;
46293 }
46294
46295- layer->ops.buffer_set(layer, layer->update_buf);
46296+ layer->ops->buffer_set(layer, layer->update_buf);
46297
46298 if (done && done != layer->shadow_buf)
46299 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
46300diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
46301index b4d2696..91df48e 100644
46302--- a/drivers/media/platform/s5p-tv/mixer_video.c
46303+++ b/drivers/media/platform/s5p-tv/mixer_video.c
46304@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
46305 layer->geo.src.height = layer->geo.src.full_height;
46306
46307 mxr_geometry_dump(mdev, &layer->geo);
46308- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46309+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46310 mxr_geometry_dump(mdev, &layer->geo);
46311 }
46312
46313@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
46314 layer->geo.dst.full_width = mbus_fmt.width;
46315 layer->geo.dst.full_height = mbus_fmt.height;
46316 layer->geo.dst.field = mbus_fmt.field;
46317- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46318+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46319
46320 mxr_geometry_dump(mdev, &layer->geo);
46321 }
46322@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
46323 /* set source size to highest accepted value */
46324 geo->src.full_width = max(geo->dst.full_width, pix->width);
46325 geo->src.full_height = max(geo->dst.full_height, pix->height);
46326- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46327+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46328 mxr_geometry_dump(mdev, &layer->geo);
46329 /* set cropping to total visible screen */
46330 geo->src.width = pix->width;
46331@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
46332 geo->src.x_offset = 0;
46333 geo->src.y_offset = 0;
46334 /* assure consistency of geometry */
46335- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
46336+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
46337 mxr_geometry_dump(mdev, &layer->geo);
46338 /* set full size to lowest possible value */
46339 geo->src.full_width = 0;
46340 geo->src.full_height = 0;
46341- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46342+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46343 mxr_geometry_dump(mdev, &layer->geo);
46344
46345 /* returning results */
46346@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
46347 target->width = s->r.width;
46348 target->height = s->r.height;
46349
46350- layer->ops.fix_geometry(layer, stage, s->flags);
46351+ layer->ops->fix_geometry(layer, stage, s->flags);
46352
46353 /* retrieve update selection rectangle */
46354 res.left = target->x_offset;
46355@@ -954,13 +954,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
46356 mxr_output_get(mdev);
46357
46358 mxr_layer_update_output(layer);
46359- layer->ops.format_set(layer);
46360+ layer->ops->format_set(layer);
46361 /* enabling layer in hardware */
46362 spin_lock_irqsave(&layer->enq_slock, flags);
46363 layer->state = MXR_LAYER_STREAMING;
46364 spin_unlock_irqrestore(&layer->enq_slock, flags);
46365
46366- layer->ops.stream_set(layer, MXR_ENABLE);
46367+ layer->ops->stream_set(layer, MXR_ENABLE);
46368 mxr_streamer_get(mdev);
46369
46370 return 0;
46371@@ -1030,7 +1030,7 @@ static void stop_streaming(struct vb2_queue *vq)
46372 spin_unlock_irqrestore(&layer->enq_slock, flags);
46373
46374 /* disabling layer in hardware */
46375- layer->ops.stream_set(layer, MXR_DISABLE);
46376+ layer->ops->stream_set(layer, MXR_DISABLE);
46377 /* remove one streamer */
46378 mxr_streamer_put(mdev);
46379 /* allow changes in output configuration */
46380@@ -1068,8 +1068,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
46381
46382 void mxr_layer_release(struct mxr_layer *layer)
46383 {
46384- if (layer->ops.release)
46385- layer->ops.release(layer);
46386+ if (layer->ops->release)
46387+ layer->ops->release(layer);
46388 }
46389
46390 void mxr_base_layer_release(struct mxr_layer *layer)
46391@@ -1095,7 +1095,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
46392
46393 layer->mdev = mdev;
46394 layer->idx = idx;
46395- layer->ops = *ops;
46396+ layer->ops = ops;
46397
46398 spin_lock_init(&layer->enq_slock);
46399 INIT_LIST_HEAD(&layer->enq_list);
46400diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46401index c9388c4..ce71ece 100644
46402--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46403+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46404@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
46405 {
46406 struct mxr_layer *layer;
46407 int ret;
46408- struct mxr_layer_ops ops = {
46409+ static struct mxr_layer_ops ops = {
46410 .release = mxr_vp_layer_release,
46411 .buffer_set = mxr_vp_buffer_set,
46412 .stream_set = mxr_vp_stream_set,
46413diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c
46414index 8033371..de5bca0 100644
46415--- a/drivers/media/platform/vivi.c
46416+++ b/drivers/media/platform/vivi.c
46417@@ -58,8 +58,8 @@ MODULE_AUTHOR("Mauro Carvalho Chehab, Ted Walther and John Sokol");
46418 MODULE_LICENSE("Dual BSD/GPL");
46419 MODULE_VERSION(VIVI_VERSION);
46420
46421-static unsigned video_nr = -1;
46422-module_param(video_nr, uint, 0644);
46423+static int video_nr = -1;
46424+module_param(video_nr, int, 0644);
46425 MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect");
46426
46427 static unsigned n_devs = 1;
46428diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
46429index 82affae..42833ec 100644
46430--- a/drivers/media/radio/radio-cadet.c
46431+++ b/drivers/media/radio/radio-cadet.c
46432@@ -333,6 +333,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
46433 unsigned char readbuf[RDS_BUFFER];
46434 int i = 0;
46435
46436+ if (count > RDS_BUFFER)
46437+ return -EFAULT;
46438 mutex_lock(&dev->lock);
46439 if (dev->rdsstat == 0)
46440 cadet_start_rds(dev);
46441@@ -349,8 +351,9 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
46442 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
46443 mutex_unlock(&dev->lock);
46444
46445- if (i && copy_to_user(data, readbuf, i))
46446- return -EFAULT;
46447+ if (i > sizeof(readbuf) || (i && copy_to_user(data, readbuf, i)))
46448+ i = -EFAULT;
46449+
46450 return i;
46451 }
46452
46453diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
46454index 5236035..c622c74 100644
46455--- a/drivers/media/radio/radio-maxiradio.c
46456+++ b/drivers/media/radio/radio-maxiradio.c
46457@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
46458 /* TEA5757 pin mappings */
46459 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
46460
46461-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
46462+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
46463
46464 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
46465 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
46466diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
46467index 050b3bb..79f62b9 100644
46468--- a/drivers/media/radio/radio-shark.c
46469+++ b/drivers/media/radio/radio-shark.c
46470@@ -79,7 +79,7 @@ struct shark_device {
46471 u32 last_val;
46472 };
46473
46474-static atomic_t shark_instance = ATOMIC_INIT(0);
46475+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
46476
46477 static void shark_write_val(struct snd_tea575x *tea, u32 val)
46478 {
46479diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
46480index 8654e0d..0608a64 100644
46481--- a/drivers/media/radio/radio-shark2.c
46482+++ b/drivers/media/radio/radio-shark2.c
46483@@ -74,7 +74,7 @@ struct shark_device {
46484 u8 *transfer_buffer;
46485 };
46486
46487-static atomic_t shark_instance = ATOMIC_INIT(0);
46488+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
46489
46490 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
46491 {
46492diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
46493index 633022b..7f10754 100644
46494--- a/drivers/media/radio/radio-si476x.c
46495+++ b/drivers/media/radio/radio-si476x.c
46496@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
46497 struct si476x_radio *radio;
46498 struct v4l2_ctrl *ctrl;
46499
46500- static atomic_t instance = ATOMIC_INIT(0);
46501+ static atomic_unchecked_t instance = ATOMIC_INIT(0);
46502
46503 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
46504 if (!radio)
46505diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
46506index 9fd1527..8927230 100644
46507--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
46508+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
46509@@ -50,29 +50,73 @@ static struct dvb_usb_device_properties cinergyt2_properties;
46510
46511 static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
46512 {
46513- char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
46514- char result[64];
46515- return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
46516- sizeof(result), 0);
46517+ char *buf;
46518+ char *result;
46519+ int retval;
46520+
46521+ buf = kmalloc(2, GFP_KERNEL);
46522+ if (buf == NULL)
46523+ return -ENOMEM;
46524+ result = kmalloc(64, GFP_KERNEL);
46525+ if (result == NULL) {
46526+ kfree(buf);
46527+ return -ENOMEM;
46528+ }
46529+
46530+ buf[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
46531+ buf[1] = enable ? 1 : 0;
46532+
46533+ retval = dvb_usb_generic_rw(adap->dev, buf, 2, result, 64, 0);
46534+
46535+ kfree(buf);
46536+ kfree(result);
46537+ return retval;
46538 }
46539
46540 static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
46541 {
46542- char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
46543- char state[3];
46544- return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
46545+ char *buf;
46546+ char *state;
46547+ int retval;
46548+
46549+ buf = kmalloc(2, GFP_KERNEL);
46550+ if (buf == NULL)
46551+ return -ENOMEM;
46552+ state = kmalloc(3, GFP_KERNEL);
46553+ if (state == NULL) {
46554+ kfree(buf);
46555+ return -ENOMEM;
46556+ }
46557+
46558+ buf[0] = CINERGYT2_EP1_SLEEP_MODE;
46559+ buf[1] = enable ? 1 : 0;
46560+
46561+ retval = dvb_usb_generic_rw(d, buf, 2, state, 3, 0);
46562+
46563+ kfree(buf);
46564+ kfree(state);
46565+ return retval;
46566 }
46567
46568 static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
46569 {
46570- char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
46571- char state[3];
46572+ char *query;
46573+ char *state;
46574 int ret;
46575+ query = kmalloc(1, GFP_KERNEL);
46576+ if (query == NULL)
46577+ return -ENOMEM;
46578+ state = kmalloc(3, GFP_KERNEL);
46579+ if (state == NULL) {
46580+ kfree(query);
46581+ return -ENOMEM;
46582+ }
46583+
46584+ query[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
46585
46586 adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
46587
46588- ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
46589- sizeof(state), 0);
46590+ ret = dvb_usb_generic_rw(adap->dev, query, 1, state, 3, 0);
46591 if (ret < 0) {
46592 deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
46593 "state info\n");
46594@@ -80,7 +124,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
46595
46596 /* Copy this pointer as we are gonna need it in the release phase */
46597 cinergyt2_usb_device = adap->dev;
46598-
46599+ kfree(query);
46600+ kfree(state);
46601 return 0;
46602 }
46603
46604@@ -141,12 +186,23 @@ static int repeatable_keys[] = {
46605 static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46606 {
46607 struct cinergyt2_state *st = d->priv;
46608- u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
46609+ u8 *key, *cmd;
46610 int i;
46611
46612+ cmd = kmalloc(1, GFP_KERNEL);
46613+ if (cmd == NULL)
46614+ return -EINVAL;
46615+ key = kzalloc(5, GFP_KERNEL);
46616+ if (key == NULL) {
46617+ kfree(cmd);
46618+ return -EINVAL;
46619+ }
46620+
46621+ cmd[0] = CINERGYT2_EP1_GET_RC_EVENTS;
46622+
46623 *state = REMOTE_NO_KEY_PRESSED;
46624
46625- dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
46626+ dvb_usb_generic_rw(d, cmd, 1, key, 5, 0);
46627 if (key[4] == 0xff) {
46628 /* key repeat */
46629 st->rc_counter++;
46630@@ -157,12 +213,12 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46631 *event = d->last_event;
46632 deb_rc("repeat key, event %x\n",
46633 *event);
46634- return 0;
46635+ goto out;
46636 }
46637 }
46638 deb_rc("repeated key (non repeatable)\n");
46639 }
46640- return 0;
46641+ goto out;
46642 }
46643
46644 /* hack to pass checksum on the custom field */
46645@@ -174,6 +230,9 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46646
46647 deb_rc("key: %*ph\n", 5, key);
46648 }
46649+out:
46650+ kfree(cmd);
46651+ kfree(key);
46652 return 0;
46653 }
46654
46655diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46656index c890fe4..f9b2ae6 100644
46657--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46658+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46659@@ -145,103 +145,176 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
46660 fe_status_t *status)
46661 {
46662 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46663- struct dvbt_get_status_msg result;
46664- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46665+ struct dvbt_get_status_msg *result;
46666+ u8 *cmd;
46667 int ret;
46668
46669- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
46670- sizeof(result), 0);
46671+ cmd = kmalloc(1, GFP_KERNEL);
46672+ if (cmd == NULL)
46673+ return -ENOMEM;
46674+ result = kmalloc(sizeof(*result), GFP_KERNEL);
46675+ if (result == NULL) {
46676+ kfree(cmd);
46677+ return -ENOMEM;
46678+ }
46679+
46680+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46681+
46682+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)result,
46683+ sizeof(*result), 0);
46684 if (ret < 0)
46685- return ret;
46686+ goto out;
46687
46688 *status = 0;
46689
46690- if (0xffff - le16_to_cpu(result.gain) > 30)
46691+ if (0xffff - le16_to_cpu(result->gain) > 30)
46692 *status |= FE_HAS_SIGNAL;
46693- if (result.lock_bits & (1 << 6))
46694+ if (result->lock_bits & (1 << 6))
46695 *status |= FE_HAS_LOCK;
46696- if (result.lock_bits & (1 << 5))
46697+ if (result->lock_bits & (1 << 5))
46698 *status |= FE_HAS_SYNC;
46699- if (result.lock_bits & (1 << 4))
46700+ if (result->lock_bits & (1 << 4))
46701 *status |= FE_HAS_CARRIER;
46702- if (result.lock_bits & (1 << 1))
46703+ if (result->lock_bits & (1 << 1))
46704 *status |= FE_HAS_VITERBI;
46705
46706 if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
46707 (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC))
46708 *status &= ~FE_HAS_LOCK;
46709
46710- return 0;
46711+out:
46712+ kfree(cmd);
46713+ kfree(result);
46714+ return ret;
46715 }
46716
46717 static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
46718 {
46719 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46720- struct dvbt_get_status_msg status;
46721- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46722+ struct dvbt_get_status_msg *status;
46723+ char *cmd;
46724 int ret;
46725
46726- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46727- sizeof(status), 0);
46728+ cmd = kmalloc(1, GFP_KERNEL);
46729+ if (cmd == NULL)
46730+ return -ENOMEM;
46731+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46732+ if (status == NULL) {
46733+ kfree(cmd);
46734+ return -ENOMEM;
46735+ }
46736+
46737+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46738+
46739+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46740+ sizeof(*status), 0);
46741 if (ret < 0)
46742- return ret;
46743+ goto out;
46744
46745- *ber = le32_to_cpu(status.viterbi_error_rate);
46746+ *ber = le32_to_cpu(status->viterbi_error_rate);
46747+out:
46748+ kfree(cmd);
46749+ kfree(status);
46750 return 0;
46751 }
46752
46753 static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
46754 {
46755 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46756- struct dvbt_get_status_msg status;
46757- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46758+ struct dvbt_get_status_msg *status;
46759+ u8 *cmd;
46760 int ret;
46761
46762- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
46763- sizeof(status), 0);
46764+ cmd = kmalloc(1, GFP_KERNEL);
46765+ if (cmd == NULL)
46766+ return -ENOMEM;
46767+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46768+ if (status == NULL) {
46769+ kfree(cmd);
46770+ return -ENOMEM;
46771+ }
46772+
46773+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46774+
46775+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)status,
46776+ sizeof(*status), 0);
46777 if (ret < 0) {
46778 err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
46779 ret);
46780- return ret;
46781+ goto out;
46782 }
46783- *unc = le32_to_cpu(status.uncorrected_block_count);
46784- return 0;
46785+ *unc = le32_to_cpu(status->uncorrected_block_count);
46786+
46787+out:
46788+ kfree(cmd);
46789+ kfree(status);
46790+ return ret;
46791 }
46792
46793 static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
46794 u16 *strength)
46795 {
46796 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46797- struct dvbt_get_status_msg status;
46798- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46799+ struct dvbt_get_status_msg *status;
46800+ char *cmd;
46801 int ret;
46802
46803- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46804- sizeof(status), 0);
46805+ cmd = kmalloc(1, GFP_KERNEL);
46806+ if (cmd == NULL)
46807+ return -ENOMEM;
46808+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46809+ if (status == NULL) {
46810+ kfree(cmd);
46811+ return -ENOMEM;
46812+ }
46813+
46814+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46815+
46816+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46817+ sizeof(*status), 0);
46818 if (ret < 0) {
46819 err("cinergyt2_fe_read_signal_strength() Failed!"
46820 " (Error=%d)\n", ret);
46821- return ret;
46822+ goto out;
46823 }
46824- *strength = (0xffff - le16_to_cpu(status.gain));
46825+ *strength = (0xffff - le16_to_cpu(status->gain));
46826+
46827+out:
46828+ kfree(cmd);
46829+ kfree(status);
46830 return 0;
46831 }
46832
46833 static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
46834 {
46835 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46836- struct dvbt_get_status_msg status;
46837- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46838+ struct dvbt_get_status_msg *status;
46839+ char *cmd;
46840 int ret;
46841
46842- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46843- sizeof(status), 0);
46844+ cmd = kmalloc(1, GFP_KERNEL);
46845+ if (cmd == NULL)
46846+ return -ENOMEM;
46847+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46848+ if (status == NULL) {
46849+ kfree(cmd);
46850+ return -ENOMEM;
46851+ }
46852+
46853+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46854+
46855+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46856+ sizeof(*status), 0);
46857 if (ret < 0) {
46858 err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
46859- return ret;
46860+ goto out;
46861 }
46862- *snr = (status.snr << 8) | status.snr;
46863- return 0;
46864+ *snr = (status->snr << 8) | status->snr;
46865+
46866+out:
46867+ kfree(cmd);
46868+ kfree(status);
46869+ return ret;
46870 }
46871
46872 static int cinergyt2_fe_init(struct dvb_frontend *fe)
46873@@ -266,35 +339,46 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
46874 {
46875 struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
46876 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46877- struct dvbt_set_parameters_msg param;
46878- char result[2];
46879+ struct dvbt_set_parameters_msg *param;
46880+ char *result;
46881 int err;
46882
46883- param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
46884- param.tps = cpu_to_le16(compute_tps(fep));
46885- param.freq = cpu_to_le32(fep->frequency / 1000);
46886- param.flags = 0;
46887+ result = kmalloc(2, GFP_KERNEL);
46888+ if (result == NULL)
46889+ return -ENOMEM;
46890+ param = kmalloc(sizeof(*param), GFP_KERNEL);
46891+ if (param == NULL) {
46892+ kfree(result);
46893+ return -ENOMEM;
46894+ }
46895+
46896+ param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
46897+ param->tps = cpu_to_le16(compute_tps(fep));
46898+ param->freq = cpu_to_le32(fep->frequency / 1000);
46899+ param->flags = 0;
46900
46901 switch (fep->bandwidth_hz) {
46902 default:
46903 case 8000000:
46904- param.bandwidth = 8;
46905+ param->bandwidth = 8;
46906 break;
46907 case 7000000:
46908- param.bandwidth = 7;
46909+ param->bandwidth = 7;
46910 break;
46911 case 6000000:
46912- param.bandwidth = 6;
46913+ param->bandwidth = 6;
46914 break;
46915 }
46916
46917 err = dvb_usb_generic_rw(state->d,
46918- (char *)&param, sizeof(param),
46919- result, sizeof(result), 0);
46920+ (char *)param, sizeof(*param),
46921+ result, 2, 0);
46922 if (err < 0)
46923 err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
46924
46925- return (err < 0) ? err : 0;
46926+ kfree(result);
46927+ kfree(param);
46928+ return err;
46929 }
46930
46931 static void cinergyt2_fe_release(struct dvb_frontend *fe)
46932diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46933index 733a7ff..f8b52e3 100644
46934--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46935+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46936@@ -35,42 +35,57 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
46937
46938 int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
46939 {
46940- struct hexline hx;
46941- u8 reset;
46942+ struct hexline *hx;
46943+ u8 *reset;
46944 int ret,pos=0;
46945
46946+ reset = kmalloc(1, GFP_KERNEL);
46947+ if (reset == NULL)
46948+ return -ENOMEM;
46949+
46950+ hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
46951+ if (hx == NULL) {
46952+ kfree(reset);
46953+ return -ENOMEM;
46954+ }
46955+
46956 /* stop the CPU */
46957- reset = 1;
46958- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
46959+ reset[0] = 1;
46960+ if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1)) != 1)
46961 err("could not stop the USB controller CPU.");
46962
46963- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
46964- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
46965- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
46966+ while ((ret = dvb_usb_get_hexline(fw,hx,&pos)) > 0) {
46967+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx->addr,hx->len,hx->chk);
46968+ ret = usb_cypress_writemem(udev,hx->addr,hx->data,hx->len);
46969
46970- if (ret != hx.len) {
46971+ if (ret != hx->len) {
46972 err("error while transferring firmware "
46973 "(transferred size: %d, block size: %d)",
46974- ret,hx.len);
46975+ ret,hx->len);
46976 ret = -EINVAL;
46977 break;
46978 }
46979 }
46980 if (ret < 0) {
46981 err("firmware download failed at %d with %d",pos,ret);
46982+ kfree(reset);
46983+ kfree(hx);
46984 return ret;
46985 }
46986
46987 if (ret == 0) {
46988 /* restart the CPU */
46989- reset = 0;
46990- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
46991+ reset[0] = 0;
46992+ if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1) != 1) {
46993 err("could not restart the USB controller CPU.");
46994 ret = -EINVAL;
46995 }
46996 } else
46997 ret = -EIO;
46998
46999+ kfree(reset);
47000+ kfree(hx);
47001+
47002 return ret;
47003 }
47004 EXPORT_SYMBOL(usb_cypress_load_firmware);
47005diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
47006index 2add8c5..c33b854 100644
47007--- a/drivers/media/usb/dvb-usb/dw2102.c
47008+++ b/drivers/media/usb/dvb-usb/dw2102.c
47009@@ -118,7 +118,7 @@ struct su3000_state {
47010
47011 struct s6x0_state {
47012 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
47013-};
47014+} __no_const;
47015
47016 /* debug */
47017 static int dvb_usb_dw2102_debug;
47018diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
47019index 6b0b8b6b..4038398 100644
47020--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
47021+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
47022@@ -87,8 +87,11 @@ struct technisat_usb2_state {
47023 static int technisat_usb2_i2c_access(struct usb_device *udev,
47024 u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
47025 {
47026- u8 b[64];
47027- int ret, actual_length;
47028+ u8 *b = kmalloc(64, GFP_KERNEL);
47029+ int ret, actual_length, error = 0;
47030+
47031+ if (b == NULL)
47032+ return -ENOMEM;
47033
47034 deb_i2c("i2c-access: %02x, tx: ", device_addr);
47035 debug_dump(tx, txlen, deb_i2c);
47036@@ -121,7 +124,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
47037
47038 if (ret < 0) {
47039 err("i2c-error: out failed %02x = %d", device_addr, ret);
47040- return -ENODEV;
47041+ error = -ENODEV;
47042+ goto out;
47043 }
47044
47045 ret = usb_bulk_msg(udev,
47046@@ -129,7 +133,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
47047 b, 64, &actual_length, 1000);
47048 if (ret < 0) {
47049 err("i2c-error: in failed %02x = %d", device_addr, ret);
47050- return -ENODEV;
47051+ error = -ENODEV;
47052+ goto out;
47053 }
47054
47055 if (b[0] != I2C_STATUS_OK) {
47056@@ -137,8 +142,10 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
47057 /* handle tuner-i2c-nak */
47058 if (!(b[0] == I2C_STATUS_NAK &&
47059 device_addr == 0x60
47060- /* && device_is_technisat_usb2 */))
47061- return -ENODEV;
47062+ /* && device_is_technisat_usb2 */)) {
47063+ error = -ENODEV;
47064+ goto out;
47065+ }
47066 }
47067
47068 deb_i2c("status: %d, ", b[0]);
47069@@ -152,7 +159,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
47070
47071 deb_i2c("\n");
47072
47073- return 0;
47074+out:
47075+ kfree(b);
47076+ return error;
47077 }
47078
47079 static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
47080@@ -224,14 +233,16 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
47081 {
47082 int ret;
47083
47084- u8 led[8] = {
47085- red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
47086- 0
47087- };
47088+ u8 *led = kzalloc(8, GFP_KERNEL);
47089+
47090+ if (led == NULL)
47091+ return -ENOMEM;
47092
47093 if (disable_led_control && state != TECH_LED_OFF)
47094 return 0;
47095
47096+ led[0] = red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST;
47097+
47098 switch (state) {
47099 case TECH_LED_ON:
47100 led[1] = 0x82;
47101@@ -263,16 +274,22 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
47102 red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
47103 USB_TYPE_VENDOR | USB_DIR_OUT,
47104 0, 0,
47105- led, sizeof(led), 500);
47106+ led, 8, 500);
47107
47108 mutex_unlock(&d->i2c_mutex);
47109+
47110+ kfree(led);
47111+
47112 return ret;
47113 }
47114
47115 static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green)
47116 {
47117 int ret;
47118- u8 b = 0;
47119+ u8 *b = kzalloc(1, GFP_KERNEL);
47120+
47121+ if (b == NULL)
47122+ return -ENOMEM;
47123
47124 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
47125 return -EAGAIN;
47126@@ -281,10 +298,12 @@ static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 gre
47127 SET_LED_TIMER_DIVIDER_VENDOR_REQUEST,
47128 USB_TYPE_VENDOR | USB_DIR_OUT,
47129 (red << 8) | green, 0,
47130- &b, 1, 500);
47131+ b, 1, 500);
47132
47133 mutex_unlock(&d->i2c_mutex);
47134
47135+ kfree(b);
47136+
47137 return ret;
47138 }
47139
47140@@ -328,7 +347,7 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
47141 struct dvb_usb_device_description **desc, int *cold)
47142 {
47143 int ret;
47144- u8 version[3];
47145+ u8 *version = kmalloc(3, GFP_KERNEL);
47146
47147 /* first select the interface */
47148 if (usb_set_interface(udev, 0, 1) != 0)
47149@@ -338,11 +357,14 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
47150
47151 *cold = 0; /* by default do not download a firmware - just in case something is wrong */
47152
47153+ if (version == NULL)
47154+ return 0;
47155+
47156 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
47157 GET_VERSION_INFO_VENDOR_REQUEST,
47158 USB_TYPE_VENDOR | USB_DIR_IN,
47159 0, 0,
47160- version, sizeof(version), 500);
47161+ version, 3, 500);
47162
47163 if (ret < 0)
47164 *cold = 1;
47165@@ -351,6 +373,8 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
47166 *cold = 0;
47167 }
47168
47169+ kfree(version);
47170+
47171 return 0;
47172 }
47173
47174@@ -591,10 +615,15 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
47175
47176 static int technisat_usb2_get_ir(struct dvb_usb_device *d)
47177 {
47178- u8 buf[62], *b;
47179+ u8 *buf, *b;
47180 int ret;
47181 struct ir_raw_event ev;
47182
47183+ buf = kmalloc(62, GFP_KERNEL);
47184+
47185+ if (buf == NULL)
47186+ return -ENOMEM;
47187+
47188 buf[0] = GET_IR_DATA_VENDOR_REQUEST;
47189 buf[1] = 0x08;
47190 buf[2] = 0x8f;
47191@@ -617,16 +646,20 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d)
47192 GET_IR_DATA_VENDOR_REQUEST,
47193 USB_TYPE_VENDOR | USB_DIR_IN,
47194 0x8080, 0,
47195- buf, sizeof(buf), 500);
47196+ buf, 62, 500);
47197
47198 unlock:
47199 mutex_unlock(&d->i2c_mutex);
47200
47201- if (ret < 0)
47202+ if (ret < 0) {
47203+ kfree(buf);
47204 return ret;
47205+ }
47206
47207- if (ret == 1)
47208+ if (ret == 1) {
47209+ kfree(buf);
47210 return 0; /* no key pressed */
47211+ }
47212
47213 /* decoding */
47214 b = buf+1;
47215@@ -653,6 +686,8 @@ unlock:
47216
47217 ir_raw_event_handle(d->rc_dev);
47218
47219+ kfree(buf);
47220+
47221 return 1;
47222 }
47223
47224diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47225index cca6c2f..77b9a18 100644
47226--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47227+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47228@@ -328,7 +328,7 @@ struct v4l2_buffer32 {
47229 __u32 reserved;
47230 };
47231
47232-static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
47233+static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
47234 enum v4l2_memory memory)
47235 {
47236 void __user *up_pln;
47237@@ -357,7 +357,7 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
47238 return 0;
47239 }
47240
47241-static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
47242+static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
47243 enum v4l2_memory memory)
47244 {
47245 if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
47246@@ -427,7 +427,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
47247 * by passing a very big num_planes value */
47248 uplane = compat_alloc_user_space(num_planes *
47249 sizeof(struct v4l2_plane));
47250- kp->m.planes = uplane;
47251+ kp->m.planes = (struct v4l2_plane __force_kernel *)uplane;
47252
47253 while (--num_planes >= 0) {
47254 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
47255@@ -498,7 +498,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
47256 if (num_planes == 0)
47257 return 0;
47258
47259- uplane = kp->m.planes;
47260+ uplane = (struct v4l2_plane __force_user *)kp->m.planes;
47261 if (get_user(p, &up->m.planes))
47262 return -EFAULT;
47263 uplane32 = compat_ptr(p);
47264@@ -562,7 +562,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
47265 get_user(kp->flags, &up->flags) ||
47266 copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
47267 return -EFAULT;
47268- kp->base = compat_ptr(tmp);
47269+ kp->base = (void __force_kernel *)compat_ptr(tmp);
47270 return 0;
47271 }
47272
47273@@ -667,7 +667,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
47274 n * sizeof(struct v4l2_ext_control32)))
47275 return -EFAULT;
47276 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
47277- kp->controls = kcontrols;
47278+ kp->controls = (struct v4l2_ext_control __force_kernel *)kcontrols;
47279 while (--n >= 0) {
47280 if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols)))
47281 return -EFAULT;
47282@@ -689,7 +689,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
47283 static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
47284 {
47285 struct v4l2_ext_control32 __user *ucontrols;
47286- struct v4l2_ext_control __user *kcontrols = kp->controls;
47287+ struct v4l2_ext_control __user *kcontrols = (struct v4l2_ext_control __force_user *)kp->controls;
47288 int n = kp->count;
47289 compat_caddr_t p;
47290
47291@@ -783,7 +783,7 @@ static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
47292 put_user(kp->start_block, &up->start_block) ||
47293 put_user(kp->blocks, &up->blocks) ||
47294 put_user(tmp, &up->edid) ||
47295- copy_to_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
47296+ copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
47297 return -EFAULT;
47298 return 0;
47299 }
47300diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
47301index 015f92a..59e311e 100644
47302--- a/drivers/media/v4l2-core/v4l2-device.c
47303+++ b/drivers/media/v4l2-core/v4l2-device.c
47304@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
47305 EXPORT_SYMBOL_GPL(v4l2_device_put);
47306
47307 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
47308- atomic_t *instance)
47309+ atomic_unchecked_t *instance)
47310 {
47311- int num = atomic_inc_return(instance) - 1;
47312+ int num = atomic_inc_return_unchecked(instance) - 1;
47313 int len = strlen(basename);
47314
47315 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
47316diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
47317index d15e167..337f374 100644
47318--- a/drivers/media/v4l2-core/v4l2-ioctl.c
47319+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
47320@@ -2142,7 +2142,8 @@ struct v4l2_ioctl_info {
47321 struct file *file, void *fh, void *p);
47322 } u;
47323 void (*debug)(const void *arg, bool write_only);
47324-};
47325+} __do_const;
47326+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
47327
47328 /* This control needs a priority check */
47329 #define INFO_FL_PRIO (1 << 0)
47330@@ -2326,7 +2327,7 @@ static long __video_do_ioctl(struct file *file,
47331 struct video_device *vfd = video_devdata(file);
47332 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
47333 bool write_only = false;
47334- struct v4l2_ioctl_info default_info;
47335+ v4l2_ioctl_info_no_const default_info;
47336 const struct v4l2_ioctl_info *info;
47337 void *fh = file->private_data;
47338 struct v4l2_fh *vfh = NULL;
47339@@ -2413,7 +2414,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47340 ret = -EINVAL;
47341 break;
47342 }
47343- *user_ptr = (void __user *)buf->m.planes;
47344+ *user_ptr = (void __force_user *)buf->m.planes;
47345 *kernel_ptr = (void **)&buf->m.planes;
47346 *array_size = sizeof(struct v4l2_plane) * buf->length;
47347 ret = 1;
47348@@ -2430,7 +2431,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47349 ret = -EINVAL;
47350 break;
47351 }
47352- *user_ptr = (void __user *)edid->edid;
47353+ *user_ptr = (void __force_user *)edid->edid;
47354 *kernel_ptr = (void **)&edid->edid;
47355 *array_size = edid->blocks * 128;
47356 ret = 1;
47357@@ -2448,7 +2449,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47358 ret = -EINVAL;
47359 break;
47360 }
47361- *user_ptr = (void __user *)ctrls->controls;
47362+ *user_ptr = (void __force_user *)ctrls->controls;
47363 *kernel_ptr = (void **)&ctrls->controls;
47364 *array_size = sizeof(struct v4l2_ext_control)
47365 * ctrls->count;
47366@@ -2549,7 +2550,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
47367 }
47368
47369 if (has_array_args) {
47370- *kernel_ptr = (void __force *)user_ptr;
47371+ *kernel_ptr = (void __force_kernel *)user_ptr;
47372 if (copy_to_user(user_ptr, mbuf, array_size))
47373 err = -EFAULT;
47374 goto out_array_args;
47375diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
47376index a896d94..a5d56b1 100644
47377--- a/drivers/message/fusion/mptbase.c
47378+++ b/drivers/message/fusion/mptbase.c
47379@@ -6752,8 +6752,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
47380 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
47381 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
47382
47383+#ifdef CONFIG_GRKERNSEC_HIDESYM
47384+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
47385+#else
47386 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
47387 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
47388+#endif
47389+
47390 /*
47391 * Rounding UP to nearest 4-kB boundary here...
47392 */
47393@@ -6766,7 +6771,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
47394 ioc->facts.GlobalCredits);
47395
47396 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
47397+#ifdef CONFIG_GRKERNSEC_HIDESYM
47398+ NULL, NULL);
47399+#else
47400 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
47401+#endif
47402 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
47403 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
47404 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
47405diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
47406index 0707fa2..70ca794 100644
47407--- a/drivers/message/fusion/mptsas.c
47408+++ b/drivers/message/fusion/mptsas.c
47409@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
47410 return 0;
47411 }
47412
47413+static inline void
47414+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
47415+{
47416+ if (phy_info->port_details) {
47417+ phy_info->port_details->rphy = rphy;
47418+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
47419+ ioc->name, rphy));
47420+ }
47421+
47422+ if (rphy) {
47423+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
47424+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
47425+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
47426+ ioc->name, rphy, rphy->dev.release));
47427+ }
47428+}
47429+
47430 /* no mutex */
47431 static void
47432 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
47433@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
47434 return NULL;
47435 }
47436
47437-static inline void
47438-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
47439-{
47440- if (phy_info->port_details) {
47441- phy_info->port_details->rphy = rphy;
47442- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
47443- ioc->name, rphy));
47444- }
47445-
47446- if (rphy) {
47447- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
47448- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
47449- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
47450- ioc->name, rphy, rphy->dev.release));
47451- }
47452-}
47453-
47454 static inline struct sas_port *
47455 mptsas_get_port(struct mptsas_phyinfo *phy_info)
47456 {
47457diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
47458index b7d87cd..3fb36da 100644
47459--- a/drivers/message/i2o/i2o_proc.c
47460+++ b/drivers/message/i2o/i2o_proc.c
47461@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
47462 "Array Controller Device"
47463 };
47464
47465-static char *chtostr(char *tmp, u8 *chars, int n)
47466-{
47467- tmp[0] = 0;
47468- return strncat(tmp, (char *)chars, n);
47469-}
47470-
47471 static int i2o_report_query_status(struct seq_file *seq, int block_status,
47472 char *group)
47473 {
47474@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
47475 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
47476 {
47477 struct i2o_controller *c = (struct i2o_controller *)seq->private;
47478- static u32 work32[5];
47479- static u8 *work8 = (u8 *) work32;
47480- static u16 *work16 = (u16 *) work32;
47481+ u32 work32[5];
47482+ u8 *work8 = (u8 *) work32;
47483+ u16 *work16 = (u16 *) work32;
47484 int token;
47485 u32 hwcap;
47486
47487@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
47488 } *result;
47489
47490 i2o_exec_execute_ddm_table ddm_table;
47491- char tmp[28 + 1];
47492
47493 result = kmalloc(sizeof(*result), GFP_KERNEL);
47494 if (!result)
47495@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
47496
47497 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
47498 seq_printf(seq, "%-#8x", ddm_table.module_id);
47499- seq_printf(seq, "%-29s",
47500- chtostr(tmp, ddm_table.module_name_version, 28));
47501+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
47502 seq_printf(seq, "%9d ", ddm_table.data_size);
47503 seq_printf(seq, "%8d", ddm_table.code_size);
47504
47505@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
47506
47507 i2o_driver_result_table *result;
47508 i2o_driver_store_table *dst;
47509- char tmp[28 + 1];
47510
47511 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
47512 if (result == NULL)
47513@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
47514
47515 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
47516 seq_printf(seq, "%-#8x", dst->module_id);
47517- seq_printf(seq, "%-29s",
47518- chtostr(tmp, dst->module_name_version, 28));
47519- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
47520+ seq_printf(seq, "%-.28s", dst->module_name_version);
47521+ seq_printf(seq, "%-.8s", dst->date);
47522 seq_printf(seq, "%8d ", dst->module_size);
47523 seq_printf(seq, "%8d ", dst->mpb_size);
47524 seq_printf(seq, "0x%04x", dst->module_flags);
47525@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
47526 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
47527 {
47528 struct i2o_device *d = (struct i2o_device *)seq->private;
47529- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
47530+ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
47531 // == (allow) 512d bytes (max)
47532- static u16 *work16 = (u16 *) work32;
47533+ u16 *work16 = (u16 *) work32;
47534 int token;
47535- char tmp[16 + 1];
47536
47537 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
47538
47539@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
47540 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
47541 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
47542 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
47543- seq_printf(seq, "Vendor info : %s\n",
47544- chtostr(tmp, (u8 *) (work32 + 2), 16));
47545- seq_printf(seq, "Product info : %s\n",
47546- chtostr(tmp, (u8 *) (work32 + 6), 16));
47547- seq_printf(seq, "Description : %s\n",
47548- chtostr(tmp, (u8 *) (work32 + 10), 16));
47549- seq_printf(seq, "Product rev. : %s\n",
47550- chtostr(tmp, (u8 *) (work32 + 14), 8));
47551+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
47552+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
47553+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
47554+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
47555
47556 seq_printf(seq, "Serial number : ");
47557 print_serial_number(seq, (u8 *) (work32 + 16),
47558@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
47559 u8 pad[256]; // allow up to 256 byte (max) serial number
47560 } result;
47561
47562- char tmp[24 + 1];
47563-
47564 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
47565
47566 if (token < 0) {
47567@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
47568 }
47569
47570 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
47571- seq_printf(seq, "Module name : %s\n",
47572- chtostr(tmp, result.module_name, 24));
47573- seq_printf(seq, "Module revision : %s\n",
47574- chtostr(tmp, result.module_rev, 8));
47575+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
47576+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
47577
47578 seq_printf(seq, "Serial number : ");
47579 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
47580@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
47581 u8 instance_number[4];
47582 } result;
47583
47584- char tmp[64 + 1];
47585-
47586 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
47587
47588 if (token < 0) {
47589@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
47590 return 0;
47591 }
47592
47593- seq_printf(seq, "Device name : %s\n",
47594- chtostr(tmp, result.device_name, 64));
47595- seq_printf(seq, "Service name : %s\n",
47596- chtostr(tmp, result.service_name, 64));
47597- seq_printf(seq, "Physical name : %s\n",
47598- chtostr(tmp, result.physical_location, 64));
47599- seq_printf(seq, "Instance number : %s\n",
47600- chtostr(tmp, result.instance_number, 4));
47601+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
47602+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
47603+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
47604+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
47605
47606 return 0;
47607 }
47608@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
47609 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
47610 {
47611 struct i2o_device *d = (struct i2o_device *)seq->private;
47612- static u32 work32[12];
47613- static u16 *work16 = (u16 *) work32;
47614- static u8 *work8 = (u8 *) work32;
47615+ u32 work32[12];
47616+ u16 *work16 = (u16 *) work32;
47617+ u8 *work8 = (u8 *) work32;
47618 int token;
47619
47620 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
47621diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
47622index 92752fb..a7494f6 100644
47623--- a/drivers/message/i2o/iop.c
47624+++ b/drivers/message/i2o/iop.c
47625@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
47626
47627 spin_lock_irqsave(&c->context_list_lock, flags);
47628
47629- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
47630- atomic_inc(&c->context_list_counter);
47631+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
47632+ atomic_inc_unchecked(&c->context_list_counter);
47633
47634- entry->context = atomic_read(&c->context_list_counter);
47635+ entry->context = atomic_read_unchecked(&c->context_list_counter);
47636
47637 list_add(&entry->list, &c->context_list);
47638
47639@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
47640
47641 #if BITS_PER_LONG == 64
47642 spin_lock_init(&c->context_list_lock);
47643- atomic_set(&c->context_list_counter, 0);
47644+ atomic_set_unchecked(&c->context_list_counter, 0);
47645 INIT_LIST_HEAD(&c->context_list);
47646 #endif
47647
47648diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
47649index b2c7e3b..85aa4764 100644
47650--- a/drivers/mfd/ab8500-debugfs.c
47651+++ b/drivers/mfd/ab8500-debugfs.c
47652@@ -100,7 +100,7 @@ static int irq_last;
47653 static u32 *irq_count;
47654 static int num_irqs;
47655
47656-static struct device_attribute **dev_attr;
47657+static device_attribute_no_const **dev_attr;
47658 static char **event_name;
47659
47660 static u8 avg_sample = SAMPLE_16;
47661diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
47662index ecbe78e..b2ca870 100644
47663--- a/drivers/mfd/max8925-i2c.c
47664+++ b/drivers/mfd/max8925-i2c.c
47665@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
47666 const struct i2c_device_id *id)
47667 {
47668 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
47669- static struct max8925_chip *chip;
47670+ struct max8925_chip *chip;
47671 struct device_node *node = client->dev.of_node;
47672
47673 if (node && !pdata) {
47674diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
47675index f243e75..322176c 100644
47676--- a/drivers/mfd/tps65910.c
47677+++ b/drivers/mfd/tps65910.c
47678@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
47679 struct tps65910_platform_data *pdata)
47680 {
47681 int ret = 0;
47682- static struct regmap_irq_chip *tps6591x_irqs_chip;
47683+ struct regmap_irq_chip *tps6591x_irqs_chip;
47684
47685 if (!irq) {
47686 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
47687diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
47688index b1dabba..24a88f2 100644
47689--- a/drivers/mfd/twl4030-irq.c
47690+++ b/drivers/mfd/twl4030-irq.c
47691@@ -34,6 +34,7 @@
47692 #include <linux/of.h>
47693 #include <linux/irqdomain.h>
47694 #include <linux/i2c/twl.h>
47695+#include <asm/pgtable.h>
47696
47697 #include "twl-core.h"
47698
47699@@ -725,10 +726,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
47700 * Install an irq handler for each of the SIH modules;
47701 * clone dummy irq_chip since PIH can't *do* anything
47702 */
47703- twl4030_irq_chip = dummy_irq_chip;
47704- twl4030_irq_chip.name = "twl4030";
47705+ pax_open_kernel();
47706+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
47707+ *(const char **)&twl4030_irq_chip.name = "twl4030";
47708
47709- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
47710+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
47711+ pax_close_kernel();
47712
47713 for (i = irq_base; i < irq_end; i++) {
47714 irq_set_chip_and_handler(i, &twl4030_irq_chip,
47715diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
47716index 464419b..64bae8d 100644
47717--- a/drivers/misc/c2port/core.c
47718+++ b/drivers/misc/c2port/core.c
47719@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
47720 goto error_idr_alloc;
47721 c2dev->id = ret;
47722
47723- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
47724+ pax_open_kernel();
47725+ *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
47726+ pax_close_kernel();
47727
47728 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
47729 "c2port%d", c2dev->id);
47730diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
47731index 3f2b625..945e179 100644
47732--- a/drivers/misc/eeprom/sunxi_sid.c
47733+++ b/drivers/misc/eeprom/sunxi_sid.c
47734@@ -126,7 +126,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
47735
47736 platform_set_drvdata(pdev, sid_data);
47737
47738- sid_bin_attr.size = sid_data->keysize;
47739+ pax_open_kernel();
47740+ *(size_t *)&sid_bin_attr.size = sid_data->keysize;
47741+ pax_close_kernel();
47742 if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
47743 return -ENODEV;
47744
47745diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
47746index 36f5d52..32311c3 100644
47747--- a/drivers/misc/kgdbts.c
47748+++ b/drivers/misc/kgdbts.c
47749@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
47750 char before[BREAK_INSTR_SIZE];
47751 char after[BREAK_INSTR_SIZE];
47752
47753- probe_kernel_read(before, (char *)kgdbts_break_test,
47754+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
47755 BREAK_INSTR_SIZE);
47756 init_simple_test();
47757 ts.tst = plant_and_detach_test;
47758@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
47759 /* Activate test with initial breakpoint */
47760 if (!is_early)
47761 kgdb_breakpoint();
47762- probe_kernel_read(after, (char *)kgdbts_break_test,
47763+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
47764 BREAK_INSTR_SIZE);
47765 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
47766 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
47767diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
47768index 3ef4627..8d00486 100644
47769--- a/drivers/misc/lis3lv02d/lis3lv02d.c
47770+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
47771@@ -497,7 +497,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
47772 * the lid is closed. This leads to interrupts as soon as a little move
47773 * is done.
47774 */
47775- atomic_inc(&lis3->count);
47776+ atomic_inc_unchecked(&lis3->count);
47777
47778 wake_up_interruptible(&lis3->misc_wait);
47779 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
47780@@ -583,7 +583,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
47781 if (lis3->pm_dev)
47782 pm_runtime_get_sync(lis3->pm_dev);
47783
47784- atomic_set(&lis3->count, 0);
47785+ atomic_set_unchecked(&lis3->count, 0);
47786 return 0;
47787 }
47788
47789@@ -615,7 +615,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
47790 add_wait_queue(&lis3->misc_wait, &wait);
47791 while (true) {
47792 set_current_state(TASK_INTERRUPTIBLE);
47793- data = atomic_xchg(&lis3->count, 0);
47794+ data = atomic_xchg_unchecked(&lis3->count, 0);
47795 if (data)
47796 break;
47797
47798@@ -656,7 +656,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
47799 struct lis3lv02d, miscdev);
47800
47801 poll_wait(file, &lis3->misc_wait, wait);
47802- if (atomic_read(&lis3->count))
47803+ if (atomic_read_unchecked(&lis3->count))
47804 return POLLIN | POLLRDNORM;
47805 return 0;
47806 }
47807diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
47808index c439c82..1f20f57 100644
47809--- a/drivers/misc/lis3lv02d/lis3lv02d.h
47810+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
47811@@ -297,7 +297,7 @@ struct lis3lv02d {
47812 struct input_polled_dev *idev; /* input device */
47813 struct platform_device *pdev; /* platform device */
47814 struct regulator_bulk_data regulators[2];
47815- atomic_t count; /* interrupt count after last read */
47816+ atomic_unchecked_t count; /* interrupt count after last read */
47817 union axis_conversion ac; /* hw -> logical axis */
47818 int mapped_btns[3];
47819
47820diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
47821index 2f30bad..c4c13d0 100644
47822--- a/drivers/misc/sgi-gru/gruhandles.c
47823+++ b/drivers/misc/sgi-gru/gruhandles.c
47824@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
47825 unsigned long nsec;
47826
47827 nsec = CLKS2NSEC(clks);
47828- atomic_long_inc(&mcs_op_statistics[op].count);
47829- atomic_long_add(nsec, &mcs_op_statistics[op].total);
47830+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
47831+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
47832 if (mcs_op_statistics[op].max < nsec)
47833 mcs_op_statistics[op].max = nsec;
47834 }
47835diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
47836index 4f76359..cdfcb2e 100644
47837--- a/drivers/misc/sgi-gru/gruprocfs.c
47838+++ b/drivers/misc/sgi-gru/gruprocfs.c
47839@@ -32,9 +32,9 @@
47840
47841 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
47842
47843-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
47844+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
47845 {
47846- unsigned long val = atomic_long_read(v);
47847+ unsigned long val = atomic_long_read_unchecked(v);
47848
47849 seq_printf(s, "%16lu %s\n", val, id);
47850 }
47851@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
47852
47853 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
47854 for (op = 0; op < mcsop_last; op++) {
47855- count = atomic_long_read(&mcs_op_statistics[op].count);
47856- total = atomic_long_read(&mcs_op_statistics[op].total);
47857+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
47858+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
47859 max = mcs_op_statistics[op].max;
47860 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
47861 count ? total / count : 0, max);
47862diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
47863index 5c3ce24..4915ccb 100644
47864--- a/drivers/misc/sgi-gru/grutables.h
47865+++ b/drivers/misc/sgi-gru/grutables.h
47866@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
47867 * GRU statistics.
47868 */
47869 struct gru_stats_s {
47870- atomic_long_t vdata_alloc;
47871- atomic_long_t vdata_free;
47872- atomic_long_t gts_alloc;
47873- atomic_long_t gts_free;
47874- atomic_long_t gms_alloc;
47875- atomic_long_t gms_free;
47876- atomic_long_t gts_double_allocate;
47877- atomic_long_t assign_context;
47878- atomic_long_t assign_context_failed;
47879- atomic_long_t free_context;
47880- atomic_long_t load_user_context;
47881- atomic_long_t load_kernel_context;
47882- atomic_long_t lock_kernel_context;
47883- atomic_long_t unlock_kernel_context;
47884- atomic_long_t steal_user_context;
47885- atomic_long_t steal_kernel_context;
47886- atomic_long_t steal_context_failed;
47887- atomic_long_t nopfn;
47888- atomic_long_t asid_new;
47889- atomic_long_t asid_next;
47890- atomic_long_t asid_wrap;
47891- atomic_long_t asid_reuse;
47892- atomic_long_t intr;
47893- atomic_long_t intr_cbr;
47894- atomic_long_t intr_tfh;
47895- atomic_long_t intr_spurious;
47896- atomic_long_t intr_mm_lock_failed;
47897- atomic_long_t call_os;
47898- atomic_long_t call_os_wait_queue;
47899- atomic_long_t user_flush_tlb;
47900- atomic_long_t user_unload_context;
47901- atomic_long_t user_exception;
47902- atomic_long_t set_context_option;
47903- atomic_long_t check_context_retarget_intr;
47904- atomic_long_t check_context_unload;
47905- atomic_long_t tlb_dropin;
47906- atomic_long_t tlb_preload_page;
47907- atomic_long_t tlb_dropin_fail_no_asid;
47908- atomic_long_t tlb_dropin_fail_upm;
47909- atomic_long_t tlb_dropin_fail_invalid;
47910- atomic_long_t tlb_dropin_fail_range_active;
47911- atomic_long_t tlb_dropin_fail_idle;
47912- atomic_long_t tlb_dropin_fail_fmm;
47913- atomic_long_t tlb_dropin_fail_no_exception;
47914- atomic_long_t tfh_stale_on_fault;
47915- atomic_long_t mmu_invalidate_range;
47916- atomic_long_t mmu_invalidate_page;
47917- atomic_long_t flush_tlb;
47918- atomic_long_t flush_tlb_gru;
47919- atomic_long_t flush_tlb_gru_tgh;
47920- atomic_long_t flush_tlb_gru_zero_asid;
47921+ atomic_long_unchecked_t vdata_alloc;
47922+ atomic_long_unchecked_t vdata_free;
47923+ atomic_long_unchecked_t gts_alloc;
47924+ atomic_long_unchecked_t gts_free;
47925+ atomic_long_unchecked_t gms_alloc;
47926+ atomic_long_unchecked_t gms_free;
47927+ atomic_long_unchecked_t gts_double_allocate;
47928+ atomic_long_unchecked_t assign_context;
47929+ atomic_long_unchecked_t assign_context_failed;
47930+ atomic_long_unchecked_t free_context;
47931+ atomic_long_unchecked_t load_user_context;
47932+ atomic_long_unchecked_t load_kernel_context;
47933+ atomic_long_unchecked_t lock_kernel_context;
47934+ atomic_long_unchecked_t unlock_kernel_context;
47935+ atomic_long_unchecked_t steal_user_context;
47936+ atomic_long_unchecked_t steal_kernel_context;
47937+ atomic_long_unchecked_t steal_context_failed;
47938+ atomic_long_unchecked_t nopfn;
47939+ atomic_long_unchecked_t asid_new;
47940+ atomic_long_unchecked_t asid_next;
47941+ atomic_long_unchecked_t asid_wrap;
47942+ atomic_long_unchecked_t asid_reuse;
47943+ atomic_long_unchecked_t intr;
47944+ atomic_long_unchecked_t intr_cbr;
47945+ atomic_long_unchecked_t intr_tfh;
47946+ atomic_long_unchecked_t intr_spurious;
47947+ atomic_long_unchecked_t intr_mm_lock_failed;
47948+ atomic_long_unchecked_t call_os;
47949+ atomic_long_unchecked_t call_os_wait_queue;
47950+ atomic_long_unchecked_t user_flush_tlb;
47951+ atomic_long_unchecked_t user_unload_context;
47952+ atomic_long_unchecked_t user_exception;
47953+ atomic_long_unchecked_t set_context_option;
47954+ atomic_long_unchecked_t check_context_retarget_intr;
47955+ atomic_long_unchecked_t check_context_unload;
47956+ atomic_long_unchecked_t tlb_dropin;
47957+ atomic_long_unchecked_t tlb_preload_page;
47958+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
47959+ atomic_long_unchecked_t tlb_dropin_fail_upm;
47960+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
47961+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
47962+ atomic_long_unchecked_t tlb_dropin_fail_idle;
47963+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
47964+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
47965+ atomic_long_unchecked_t tfh_stale_on_fault;
47966+ atomic_long_unchecked_t mmu_invalidate_range;
47967+ atomic_long_unchecked_t mmu_invalidate_page;
47968+ atomic_long_unchecked_t flush_tlb;
47969+ atomic_long_unchecked_t flush_tlb_gru;
47970+ atomic_long_unchecked_t flush_tlb_gru_tgh;
47971+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
47972
47973- atomic_long_t copy_gpa;
47974- atomic_long_t read_gpa;
47975+ atomic_long_unchecked_t copy_gpa;
47976+ atomic_long_unchecked_t read_gpa;
47977
47978- atomic_long_t mesq_receive;
47979- atomic_long_t mesq_receive_none;
47980- atomic_long_t mesq_send;
47981- atomic_long_t mesq_send_failed;
47982- atomic_long_t mesq_noop;
47983- atomic_long_t mesq_send_unexpected_error;
47984- atomic_long_t mesq_send_lb_overflow;
47985- atomic_long_t mesq_send_qlimit_reached;
47986- atomic_long_t mesq_send_amo_nacked;
47987- atomic_long_t mesq_send_put_nacked;
47988- atomic_long_t mesq_page_overflow;
47989- atomic_long_t mesq_qf_locked;
47990- atomic_long_t mesq_qf_noop_not_full;
47991- atomic_long_t mesq_qf_switch_head_failed;
47992- atomic_long_t mesq_qf_unexpected_error;
47993- atomic_long_t mesq_noop_unexpected_error;
47994- atomic_long_t mesq_noop_lb_overflow;
47995- atomic_long_t mesq_noop_qlimit_reached;
47996- atomic_long_t mesq_noop_amo_nacked;
47997- atomic_long_t mesq_noop_put_nacked;
47998- atomic_long_t mesq_noop_page_overflow;
47999+ atomic_long_unchecked_t mesq_receive;
48000+ atomic_long_unchecked_t mesq_receive_none;
48001+ atomic_long_unchecked_t mesq_send;
48002+ atomic_long_unchecked_t mesq_send_failed;
48003+ atomic_long_unchecked_t mesq_noop;
48004+ atomic_long_unchecked_t mesq_send_unexpected_error;
48005+ atomic_long_unchecked_t mesq_send_lb_overflow;
48006+ atomic_long_unchecked_t mesq_send_qlimit_reached;
48007+ atomic_long_unchecked_t mesq_send_amo_nacked;
48008+ atomic_long_unchecked_t mesq_send_put_nacked;
48009+ atomic_long_unchecked_t mesq_page_overflow;
48010+ atomic_long_unchecked_t mesq_qf_locked;
48011+ atomic_long_unchecked_t mesq_qf_noop_not_full;
48012+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
48013+ atomic_long_unchecked_t mesq_qf_unexpected_error;
48014+ atomic_long_unchecked_t mesq_noop_unexpected_error;
48015+ atomic_long_unchecked_t mesq_noop_lb_overflow;
48016+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
48017+ atomic_long_unchecked_t mesq_noop_amo_nacked;
48018+ atomic_long_unchecked_t mesq_noop_put_nacked;
48019+ atomic_long_unchecked_t mesq_noop_page_overflow;
48020
48021 };
48022
48023@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
48024 tghop_invalidate, mcsop_last};
48025
48026 struct mcs_op_statistic {
48027- atomic_long_t count;
48028- atomic_long_t total;
48029+ atomic_long_unchecked_t count;
48030+ atomic_long_unchecked_t total;
48031 unsigned long max;
48032 };
48033
48034@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
48035
48036 #define STAT(id) do { \
48037 if (gru_options & OPT_STATS) \
48038- atomic_long_inc(&gru_stats.id); \
48039+ atomic_long_inc_unchecked(&gru_stats.id); \
48040 } while (0)
48041
48042 #ifdef CONFIG_SGI_GRU_DEBUG
48043diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
48044index c862cd4..0d176fe 100644
48045--- a/drivers/misc/sgi-xp/xp.h
48046+++ b/drivers/misc/sgi-xp/xp.h
48047@@ -288,7 +288,7 @@ struct xpc_interface {
48048 xpc_notify_func, void *);
48049 void (*received) (short, int, void *);
48050 enum xp_retval (*partid_to_nasids) (short, void *);
48051-};
48052+} __no_const;
48053
48054 extern struct xpc_interface xpc_interface;
48055
48056diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
48057index 01be66d..e3a0c7e 100644
48058--- a/drivers/misc/sgi-xp/xp_main.c
48059+++ b/drivers/misc/sgi-xp/xp_main.c
48060@@ -78,13 +78,13 @@ xpc_notloaded(void)
48061 }
48062
48063 struct xpc_interface xpc_interface = {
48064- (void (*)(int))xpc_notloaded,
48065- (void (*)(int))xpc_notloaded,
48066- (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
48067- (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
48068+ .connect = (void (*)(int))xpc_notloaded,
48069+ .disconnect = (void (*)(int))xpc_notloaded,
48070+ .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
48071+ .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
48072 void *))xpc_notloaded,
48073- (void (*)(short, int, void *))xpc_notloaded,
48074- (enum xp_retval(*)(short, void *))xpc_notloaded
48075+ .received = (void (*)(short, int, void *))xpc_notloaded,
48076+ .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
48077 };
48078 EXPORT_SYMBOL_GPL(xpc_interface);
48079
48080diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
48081index b94d5f7..7f494c5 100644
48082--- a/drivers/misc/sgi-xp/xpc.h
48083+++ b/drivers/misc/sgi-xp/xpc.h
48084@@ -835,6 +835,7 @@ struct xpc_arch_operations {
48085 void (*received_payload) (struct xpc_channel *, void *);
48086 void (*notify_senders_of_disconnect) (struct xpc_channel *);
48087 };
48088+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
48089
48090 /* struct xpc_partition act_state values (for XPC HB) */
48091
48092@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
48093 /* found in xpc_main.c */
48094 extern struct device *xpc_part;
48095 extern struct device *xpc_chan;
48096-extern struct xpc_arch_operations xpc_arch_ops;
48097+extern xpc_arch_operations_no_const xpc_arch_ops;
48098 extern int xpc_disengage_timelimit;
48099 extern int xpc_disengage_timedout;
48100 extern int xpc_activate_IRQ_rcvd;
48101diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
48102index 82dc574..8539ab2 100644
48103--- a/drivers/misc/sgi-xp/xpc_main.c
48104+++ b/drivers/misc/sgi-xp/xpc_main.c
48105@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
48106 .notifier_call = xpc_system_die,
48107 };
48108
48109-struct xpc_arch_operations xpc_arch_ops;
48110+xpc_arch_operations_no_const xpc_arch_ops;
48111
48112 /*
48113 * Timer function to enforce the timelimit on the partition disengage.
48114@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
48115
48116 if (((die_args->trapnr == X86_TRAP_MF) ||
48117 (die_args->trapnr == X86_TRAP_XF)) &&
48118- !user_mode_vm(die_args->regs))
48119+ !user_mode(die_args->regs))
48120 xpc_die_deactivate();
48121
48122 break;
48123diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
48124index ede41f0..744fbd9 100644
48125--- a/drivers/mmc/card/block.c
48126+++ b/drivers/mmc/card/block.c
48127@@ -574,7 +574,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
48128 if (idata->ic.postsleep_min_us)
48129 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
48130
48131- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
48132+ if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
48133 err = -EFAULT;
48134 goto cmd_rel_host;
48135 }
48136diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
48137index f51b5ba..86614a7 100644
48138--- a/drivers/mmc/core/mmc_ops.c
48139+++ b/drivers/mmc/core/mmc_ops.c
48140@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
48141 void *data_buf;
48142 int is_on_stack;
48143
48144- is_on_stack = object_is_on_stack(buf);
48145+ is_on_stack = object_starts_on_stack(buf);
48146 if (is_on_stack) {
48147 /*
48148 * dma onto stack is unsafe/nonportable, but callers to this
48149diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
48150index 08fd956..370487a 100644
48151--- a/drivers/mmc/host/dw_mmc.h
48152+++ b/drivers/mmc/host/dw_mmc.h
48153@@ -262,5 +262,5 @@ struct dw_mci_drv_data {
48154 int (*parse_dt)(struct dw_mci *host);
48155 int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode,
48156 struct dw_mci_tuning_data *tuning_data);
48157-};
48158+} __do_const;
48159 #endif /* _DW_MMC_H_ */
48160diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
48161index e4d4707..28262a3 100644
48162--- a/drivers/mmc/host/mmci.c
48163+++ b/drivers/mmc/host/mmci.c
48164@@ -1612,7 +1612,9 @@ static int mmci_probe(struct amba_device *dev,
48165 mmc->caps |= MMC_CAP_CMD23;
48166
48167 if (variant->busy_detect) {
48168- mmci_ops.card_busy = mmci_card_busy;
48169+ pax_open_kernel();
48170+ *(void **)&mmci_ops.card_busy = mmci_card_busy;
48171+ pax_close_kernel();
48172 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
48173 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
48174 mmc->max_busy_timeout = 0;
48175diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
48176index ccec0e3..199f9ce 100644
48177--- a/drivers/mmc/host/sdhci-esdhc-imx.c
48178+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
48179@@ -1034,9 +1034,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
48180 host->mmc->caps |= MMC_CAP_1_8V_DDR;
48181 }
48182
48183- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
48184- sdhci_esdhc_ops.platform_execute_tuning =
48185+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
48186+ pax_open_kernel();
48187+ *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
48188 esdhc_executing_tuning;
48189+ pax_close_kernel();
48190+ }
48191
48192 if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
48193 writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
48194diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
48195index fa5954a..56840e5 100644
48196--- a/drivers/mmc/host/sdhci-s3c.c
48197+++ b/drivers/mmc/host/sdhci-s3c.c
48198@@ -584,9 +584,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
48199 * we can use overriding functions instead of default.
48200 */
48201 if (sc->no_divider) {
48202- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
48203- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
48204- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
48205+ pax_open_kernel();
48206+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
48207+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
48208+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
48209+ pax_close_kernel();
48210 }
48211
48212 /* It supports additional host capabilities if needed */
48213diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
48214index 423666b..81ff5eb 100644
48215--- a/drivers/mtd/chips/cfi_cmdset_0020.c
48216+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
48217@@ -666,7 +666,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
48218 size_t totlen = 0, thislen;
48219 int ret = 0;
48220 size_t buflen = 0;
48221- static char *buffer;
48222+ char *buffer;
48223
48224 if (!ECCBUF_SIZE) {
48225 /* We should fall back to a general writev implementation.
48226diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
48227index 0b071a3..8ec3d5b 100644
48228--- a/drivers/mtd/nand/denali.c
48229+++ b/drivers/mtd/nand/denali.c
48230@@ -24,6 +24,7 @@
48231 #include <linux/slab.h>
48232 #include <linux/mtd/mtd.h>
48233 #include <linux/module.h>
48234+#include <linux/slab.h>
48235
48236 #include "denali.h"
48237
48238diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
48239index 959cb9b..8520fe5 100644
48240--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
48241+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
48242@@ -386,7 +386,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
48243
48244 /* first try to map the upper buffer directly */
48245 if (virt_addr_valid(this->upper_buf) &&
48246- !object_is_on_stack(this->upper_buf)) {
48247+ !object_starts_on_stack(this->upper_buf)) {
48248 sg_init_one(sgl, this->upper_buf, this->upper_len);
48249 ret = dma_map_sg(this->dev, sgl, 1, dr);
48250 if (ret == 0)
48251diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
48252index 51b9d6a..52af9a7 100644
48253--- a/drivers/mtd/nftlmount.c
48254+++ b/drivers/mtd/nftlmount.c
48255@@ -24,6 +24,7 @@
48256 #include <asm/errno.h>
48257 #include <linux/delay.h>
48258 #include <linux/slab.h>
48259+#include <linux/sched.h>
48260 #include <linux/mtd/mtd.h>
48261 #include <linux/mtd/nand.h>
48262 #include <linux/mtd/nftl.h>
48263diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
48264index cf49c22..971b133 100644
48265--- a/drivers/mtd/sm_ftl.c
48266+++ b/drivers/mtd/sm_ftl.c
48267@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
48268 #define SM_CIS_VENDOR_OFFSET 0x59
48269 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
48270 {
48271- struct attribute_group *attr_group;
48272+ attribute_group_no_const *attr_group;
48273 struct attribute **attributes;
48274 struct sm_sysfs_attribute *vendor_attribute;
48275 char *vendor;
48276diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
48277index d163e11..f517018 100644
48278--- a/drivers/net/bonding/bond_netlink.c
48279+++ b/drivers/net/bonding/bond_netlink.c
48280@@ -548,7 +548,7 @@ nla_put_failure:
48281 return -EMSGSIZE;
48282 }
48283
48284-struct rtnl_link_ops bond_link_ops __read_mostly = {
48285+struct rtnl_link_ops bond_link_ops = {
48286 .kind = "bond",
48287 .priv_size = sizeof(struct bonding),
48288 .setup = bond_setup,
48289diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
48290index 4168822..f38eeddf 100644
48291--- a/drivers/net/can/Kconfig
48292+++ b/drivers/net/can/Kconfig
48293@@ -98,7 +98,7 @@ config CAN_JANZ_ICAN3
48294
48295 config CAN_FLEXCAN
48296 tristate "Support for Freescale FLEXCAN based chips"
48297- depends on ARM || PPC
48298+ depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
48299 ---help---
48300 Say Y here if you want to support for Freescale FlexCAN.
48301
48302diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
48303index 1d162cc..b546a75 100644
48304--- a/drivers/net/ethernet/8390/ax88796.c
48305+++ b/drivers/net/ethernet/8390/ax88796.c
48306@@ -889,9 +889,11 @@ static int ax_probe(struct platform_device *pdev)
48307 if (ax->plat->reg_offsets)
48308 ei_local->reg_offset = ax->plat->reg_offsets;
48309 else {
48310+ resource_size_t _mem_size = mem_size;
48311+ do_div(_mem_size, 0x18);
48312 ei_local->reg_offset = ax->reg_offsets;
48313 for (ret = 0; ret < 0x18; ret++)
48314- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
48315+ ax->reg_offsets[ret] = _mem_size * ret;
48316 }
48317
48318 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
48319diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
48320index 7330681..7e9e463 100644
48321--- a/drivers/net/ethernet/altera/altera_tse_main.c
48322+++ b/drivers/net/ethernet/altera/altera_tse_main.c
48323@@ -1182,7 +1182,7 @@ static int tse_shutdown(struct net_device *dev)
48324 return 0;
48325 }
48326
48327-static struct net_device_ops altera_tse_netdev_ops = {
48328+static net_device_ops_no_const altera_tse_netdev_ops __read_only = {
48329 .ndo_open = tse_open,
48330 .ndo_stop = tse_shutdown,
48331 .ndo_start_xmit = tse_start_xmit,
48332@@ -1439,11 +1439,13 @@ static int altera_tse_probe(struct platform_device *pdev)
48333 ndev->netdev_ops = &altera_tse_netdev_ops;
48334 altera_tse_set_ethtool_ops(ndev);
48335
48336+ pax_open_kernel();
48337 altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
48338
48339 if (priv->hash_filter)
48340 altera_tse_netdev_ops.ndo_set_rx_mode =
48341 tse_set_rx_mode_hashfilter;
48342+ pax_close_kernel();
48343
48344 /* Scatter/gather IO is not supported,
48345 * so it is turned off
48346diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48347index cc25a3a..c8d72d3 100644
48348--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48349+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48350@@ -1083,14 +1083,14 @@ do { \
48351 * operations, everything works on mask values.
48352 */
48353 #define XMDIO_READ(_pdata, _mmd, _reg) \
48354- ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
48355+ ((_pdata)->hw_if->read_mmd_regs((_pdata), 0, \
48356 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
48357
48358 #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
48359 (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
48360
48361 #define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
48362- ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
48363+ ((_pdata)->hw_if->write_mmd_regs((_pdata), 0, \
48364 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
48365
48366 #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
48367diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
48368index 7d6a49b..e6d403b 100644
48369--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
48370+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
48371@@ -188,7 +188,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
48372
48373 memcpy(pdata->ets, ets, sizeof(*pdata->ets));
48374
48375- pdata->hw_if.config_dcb_tc(pdata);
48376+ pdata->hw_if->config_dcb_tc(pdata);
48377
48378 return 0;
48379 }
48380@@ -227,7 +227,7 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
48381
48382 memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
48383
48384- pdata->hw_if.config_dcb_pfc(pdata);
48385+ pdata->hw_if->config_dcb_pfc(pdata);
48386
48387 return 0;
48388 }
48389diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48390index 1c5d62e..8e14d54 100644
48391--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48392+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48393@@ -236,7 +236,7 @@ err_ring:
48394
48395 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
48396 {
48397- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48398+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48399 struct xgbe_channel *channel;
48400 struct xgbe_ring *ring;
48401 struct xgbe_ring_data *rdata;
48402@@ -277,7 +277,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
48403
48404 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
48405 {
48406- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48407+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48408 struct xgbe_channel *channel;
48409 struct xgbe_ring *ring;
48410 struct xgbe_ring_desc *rdesc;
48411@@ -506,7 +506,7 @@ err_out:
48412 static void xgbe_realloc_skb(struct xgbe_channel *channel)
48413 {
48414 struct xgbe_prv_data *pdata = channel->pdata;
48415- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48416+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48417 struct xgbe_ring *ring = channel->rx_ring;
48418 struct xgbe_ring_data *rdata;
48419 struct sk_buff *skb = NULL;
48420@@ -550,17 +550,12 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel)
48421 DBGPR("<--xgbe_realloc_skb\n");
48422 }
48423
48424-void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
48425-{
48426- DBGPR("-->xgbe_init_function_ptrs_desc\n");
48427-
48428- desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
48429- desc_if->free_ring_resources = xgbe_free_ring_resources;
48430- desc_if->map_tx_skb = xgbe_map_tx_skb;
48431- desc_if->realloc_skb = xgbe_realloc_skb;
48432- desc_if->unmap_skb = xgbe_unmap_skb;
48433- desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
48434- desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
48435-
48436- DBGPR("<--xgbe_init_function_ptrs_desc\n");
48437-}
48438+const struct xgbe_desc_if default_xgbe_desc_if = {
48439+ .alloc_ring_resources = xgbe_alloc_ring_resources,
48440+ .free_ring_resources = xgbe_free_ring_resources,
48441+ .map_tx_skb = xgbe_map_tx_skb,
48442+ .realloc_skb = xgbe_realloc_skb,
48443+ .unmap_skb = xgbe_unmap_skb,
48444+ .wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init,
48445+ .wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init,
48446+};
48447diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48448index ea27383..faa8936 100644
48449--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48450+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48451@@ -2463,7 +2463,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
48452
48453 static int xgbe_init(struct xgbe_prv_data *pdata)
48454 {
48455- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48456+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48457 int ret;
48458
48459 DBGPR("-->xgbe_init\n");
48460@@ -2525,101 +2525,96 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
48461 return 0;
48462 }
48463
48464-void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
48465-{
48466- DBGPR("-->xgbe_init_function_ptrs\n");
48467-
48468- hw_if->tx_complete = xgbe_tx_complete;
48469-
48470- hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
48471- hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
48472- hw_if->add_mac_addresses = xgbe_add_mac_addresses;
48473- hw_if->set_mac_address = xgbe_set_mac_address;
48474-
48475- hw_if->enable_rx_csum = xgbe_enable_rx_csum;
48476- hw_if->disable_rx_csum = xgbe_disable_rx_csum;
48477-
48478- hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
48479- hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
48480- hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
48481- hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
48482- hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
48483-
48484- hw_if->read_mmd_regs = xgbe_read_mmd_regs;
48485- hw_if->write_mmd_regs = xgbe_write_mmd_regs;
48486-
48487- hw_if->set_gmii_speed = xgbe_set_gmii_speed;
48488- hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
48489- hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
48490-
48491- hw_if->enable_tx = xgbe_enable_tx;
48492- hw_if->disable_tx = xgbe_disable_tx;
48493- hw_if->enable_rx = xgbe_enable_rx;
48494- hw_if->disable_rx = xgbe_disable_rx;
48495-
48496- hw_if->powerup_tx = xgbe_powerup_tx;
48497- hw_if->powerdown_tx = xgbe_powerdown_tx;
48498- hw_if->powerup_rx = xgbe_powerup_rx;
48499- hw_if->powerdown_rx = xgbe_powerdown_rx;
48500-
48501- hw_if->pre_xmit = xgbe_pre_xmit;
48502- hw_if->dev_read = xgbe_dev_read;
48503- hw_if->enable_int = xgbe_enable_int;
48504- hw_if->disable_int = xgbe_disable_int;
48505- hw_if->init = xgbe_init;
48506- hw_if->exit = xgbe_exit;
48507+const struct xgbe_hw_if default_xgbe_hw_if = {
48508+ .tx_complete = xgbe_tx_complete,
48509+
48510+ .set_promiscuous_mode = xgbe_set_promiscuous_mode,
48511+ .set_all_multicast_mode = xgbe_set_all_multicast_mode,
48512+ .add_mac_addresses = xgbe_add_mac_addresses,
48513+ .set_mac_address = xgbe_set_mac_address,
48514+
48515+ .enable_rx_csum = xgbe_enable_rx_csum,
48516+ .disable_rx_csum = xgbe_disable_rx_csum,
48517+
48518+ .enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping,
48519+ .disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping,
48520+ .enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering,
48521+ .disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering,
48522+ .update_vlan_hash_table = xgbe_update_vlan_hash_table,
48523+
48524+ .read_mmd_regs = xgbe_read_mmd_regs,
48525+ .write_mmd_regs = xgbe_write_mmd_regs,
48526+
48527+ .set_gmii_speed = xgbe_set_gmii_speed,
48528+ .set_gmii_2500_speed = xgbe_set_gmii_2500_speed,
48529+ .set_xgmii_speed = xgbe_set_xgmii_speed,
48530+
48531+ .enable_tx = xgbe_enable_tx,
48532+ .disable_tx = xgbe_disable_tx,
48533+ .enable_rx = xgbe_enable_rx,
48534+ .disable_rx = xgbe_disable_rx,
48535+
48536+ .powerup_tx = xgbe_powerup_tx,
48537+ .powerdown_tx = xgbe_powerdown_tx,
48538+ .powerup_rx = xgbe_powerup_rx,
48539+ .powerdown_rx = xgbe_powerdown_rx,
48540+
48541+ .pre_xmit = xgbe_pre_xmit,
48542+ .dev_read = xgbe_dev_read,
48543+ .enable_int = xgbe_enable_int,
48544+ .disable_int = xgbe_disable_int,
48545+ .init = xgbe_init,
48546+ .exit = xgbe_exit,
48547
48548 /* Descriptor related Sequences have to be initialized here */
48549- hw_if->tx_desc_init = xgbe_tx_desc_init;
48550- hw_if->rx_desc_init = xgbe_rx_desc_init;
48551- hw_if->tx_desc_reset = xgbe_tx_desc_reset;
48552- hw_if->rx_desc_reset = xgbe_rx_desc_reset;
48553- hw_if->is_last_desc = xgbe_is_last_desc;
48554- hw_if->is_context_desc = xgbe_is_context_desc;
48555+ .tx_desc_init = xgbe_tx_desc_init,
48556+ .rx_desc_init = xgbe_rx_desc_init,
48557+ .tx_desc_reset = xgbe_tx_desc_reset,
48558+ .rx_desc_reset = xgbe_rx_desc_reset,
48559+ .is_last_desc = xgbe_is_last_desc,
48560+ .is_context_desc = xgbe_is_context_desc,
48561
48562 /* For FLOW ctrl */
48563- hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
48564- hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
48565+ .config_tx_flow_control = xgbe_config_tx_flow_control,
48566+ .config_rx_flow_control = xgbe_config_rx_flow_control,
48567
48568 /* For RX coalescing */
48569- hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
48570- hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
48571- hw_if->usec_to_riwt = xgbe_usec_to_riwt;
48572- hw_if->riwt_to_usec = xgbe_riwt_to_usec;
48573+ .config_rx_coalesce = xgbe_config_rx_coalesce,
48574+ .config_tx_coalesce = xgbe_config_tx_coalesce,
48575+ .usec_to_riwt = xgbe_usec_to_riwt,
48576+ .riwt_to_usec = xgbe_riwt_to_usec,
48577
48578 /* For RX and TX threshold config */
48579- hw_if->config_rx_threshold = xgbe_config_rx_threshold;
48580- hw_if->config_tx_threshold = xgbe_config_tx_threshold;
48581+ .config_rx_threshold = xgbe_config_rx_threshold,
48582+ .config_tx_threshold = xgbe_config_tx_threshold,
48583
48584 /* For RX and TX Store and Forward Mode config */
48585- hw_if->config_rsf_mode = xgbe_config_rsf_mode;
48586- hw_if->config_tsf_mode = xgbe_config_tsf_mode;
48587+ .config_rsf_mode = xgbe_config_rsf_mode,
48588+ .config_tsf_mode = xgbe_config_tsf_mode,
48589
48590 /* For TX DMA Operating on Second Frame config */
48591- hw_if->config_osp_mode = xgbe_config_osp_mode;
48592+ .config_osp_mode = xgbe_config_osp_mode,
48593
48594 /* For RX and TX PBL config */
48595- hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
48596- hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
48597- hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
48598- hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
48599- hw_if->config_pblx8 = xgbe_config_pblx8;
48600+ .config_rx_pbl_val = xgbe_config_rx_pbl_val,
48601+ .get_rx_pbl_val = xgbe_get_rx_pbl_val,
48602+ .config_tx_pbl_val = xgbe_config_tx_pbl_val,
48603+ .get_tx_pbl_val = xgbe_get_tx_pbl_val,
48604+ .config_pblx8 = xgbe_config_pblx8,
48605
48606 /* For MMC statistics support */
48607- hw_if->tx_mmc_int = xgbe_tx_mmc_int;
48608- hw_if->rx_mmc_int = xgbe_rx_mmc_int;
48609- hw_if->read_mmc_stats = xgbe_read_mmc_stats;
48610+ .tx_mmc_int = xgbe_tx_mmc_int,
48611+ .rx_mmc_int = xgbe_rx_mmc_int,
48612+ .read_mmc_stats = xgbe_read_mmc_stats,
48613
48614 /* For PTP config */
48615- hw_if->config_tstamp = xgbe_config_tstamp;
48616- hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
48617- hw_if->set_tstamp_time = xgbe_set_tstamp_time;
48618- hw_if->get_tstamp_time = xgbe_get_tstamp_time;
48619- hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
48620+ .config_tstamp = xgbe_config_tstamp,
48621+ .update_tstamp_addend = xgbe_update_tstamp_addend,
48622+ .set_tstamp_time = xgbe_set_tstamp_time,
48623+ .get_tstamp_time = xgbe_get_tstamp_time,
48624+ .get_tx_tstamp = xgbe_get_tx_tstamp,
48625
48626 /* For Data Center Bridging config */
48627- hw_if->config_dcb_tc = xgbe_config_dcb_tc;
48628- hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
48629-
48630- DBGPR("<--xgbe_init_function_ptrs\n");
48631-}
48632+ .config_dcb_tc = xgbe_config_dcb_tc,
48633+ .config_dcb_pfc = xgbe_config_dcb_pfc,
48634+};
48635diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48636index b26d758..b0d1c3b 100644
48637--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48638+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48639@@ -155,7 +155,7 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
48640
48641 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
48642 {
48643- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48644+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48645 struct xgbe_channel *channel;
48646 enum xgbe_int int_id;
48647 unsigned int i;
48648@@ -177,7 +177,7 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
48649
48650 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
48651 {
48652- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48653+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48654 struct xgbe_channel *channel;
48655 enum xgbe_int int_id;
48656 unsigned int i;
48657@@ -200,7 +200,7 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
48658 static irqreturn_t xgbe_isr(int irq, void *data)
48659 {
48660 struct xgbe_prv_data *pdata = data;
48661- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48662+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48663 struct xgbe_channel *channel;
48664 unsigned int dma_isr, dma_ch_isr;
48665 unsigned int mac_isr, mac_tssr;
48666@@ -447,7 +447,7 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
48667
48668 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
48669 {
48670- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48671+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48672
48673 DBGPR("-->xgbe_init_tx_coalesce\n");
48674
48675@@ -461,7 +461,7 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
48676
48677 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
48678 {
48679- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48680+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48681
48682 DBGPR("-->xgbe_init_rx_coalesce\n");
48683
48684@@ -475,7 +475,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
48685
48686 static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
48687 {
48688- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48689+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48690 struct xgbe_channel *channel;
48691 struct xgbe_ring *ring;
48692 struct xgbe_ring_data *rdata;
48693@@ -500,7 +500,7 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
48694
48695 static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
48696 {
48697- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48698+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48699 struct xgbe_channel *channel;
48700 struct xgbe_ring *ring;
48701 struct xgbe_ring_data *rdata;
48702@@ -526,7 +526,7 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
48703 static void xgbe_adjust_link(struct net_device *netdev)
48704 {
48705 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48706- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48707+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48708 struct phy_device *phydev = pdata->phydev;
48709 int new_state = 0;
48710
48711@@ -634,7 +634,7 @@ static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
48712 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
48713 {
48714 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48715- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48716+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48717 unsigned long flags;
48718
48719 DBGPR("-->xgbe_powerdown\n");
48720@@ -672,7 +672,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
48721 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
48722 {
48723 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48724- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48725+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48726 unsigned long flags;
48727
48728 DBGPR("-->xgbe_powerup\n");
48729@@ -709,7 +709,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
48730
48731 static int xgbe_start(struct xgbe_prv_data *pdata)
48732 {
48733- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48734+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48735 struct net_device *netdev = pdata->netdev;
48736
48737 DBGPR("-->xgbe_start\n");
48738@@ -735,7 +735,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
48739
48740 static void xgbe_stop(struct xgbe_prv_data *pdata)
48741 {
48742- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48743+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48744 struct net_device *netdev = pdata->netdev;
48745
48746 DBGPR("-->xgbe_stop\n");
48747@@ -755,7 +755,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
48748
48749 static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
48750 {
48751- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48752+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48753
48754 DBGPR("-->xgbe_restart_dev\n");
48755
48756@@ -952,7 +952,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
48757 return -ERANGE;
48758 }
48759
48760- pdata->hw_if.config_tstamp(pdata, mac_tscr);
48761+ pdata->hw_if->config_tstamp(pdata, mac_tscr);
48762
48763 memcpy(&pdata->tstamp_config, &config, sizeof(config));
48764
48765@@ -1090,8 +1090,8 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
48766 static int xgbe_open(struct net_device *netdev)
48767 {
48768 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48769- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48770- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48771+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48772+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48773 int ret;
48774
48775 DBGPR("-->xgbe_open\n");
48776@@ -1171,8 +1171,8 @@ err_phy_init:
48777 static int xgbe_close(struct net_device *netdev)
48778 {
48779 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48780- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48781- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48782+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48783+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48784
48785 DBGPR("-->xgbe_close\n");
48786
48787@@ -1206,8 +1206,8 @@ static int xgbe_close(struct net_device *netdev)
48788 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
48789 {
48790 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48791- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48792- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48793+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48794+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48795 struct xgbe_channel *channel;
48796 struct xgbe_ring *ring;
48797 struct xgbe_packet_data *packet;
48798@@ -1276,7 +1276,7 @@ tx_netdev_return:
48799 static void xgbe_set_rx_mode(struct net_device *netdev)
48800 {
48801 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48802- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48803+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48804 unsigned int pr_mode, am_mode;
48805
48806 DBGPR("-->xgbe_set_rx_mode\n");
48807@@ -1295,7 +1295,7 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
48808 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
48809 {
48810 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48811- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48812+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48813 struct sockaddr *saddr = addr;
48814
48815 DBGPR("-->xgbe_set_mac_address\n");
48816@@ -1362,7 +1362,7 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
48817
48818 DBGPR("-->%s\n", __func__);
48819
48820- pdata->hw_if.read_mmc_stats(pdata);
48821+ pdata->hw_if->read_mmc_stats(pdata);
48822
48823 s->rx_packets = pstats->rxframecount_gb;
48824 s->rx_bytes = pstats->rxoctetcount_gb;
48825@@ -1389,7 +1389,7 @@ static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
48826 u16 vid)
48827 {
48828 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48829- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48830+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48831
48832 DBGPR("-->%s\n", __func__);
48833
48834@@ -1405,7 +1405,7 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
48835 u16 vid)
48836 {
48837 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48838- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48839+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48840
48841 DBGPR("-->%s\n", __func__);
48842
48843@@ -1465,7 +1465,7 @@ static int xgbe_set_features(struct net_device *netdev,
48844 netdev_features_t features)
48845 {
48846 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48847- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48848+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48849 unsigned int rxcsum, rxvlan, rxvlan_filter;
48850
48851 rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
48852@@ -1521,7 +1521,7 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
48853 static void xgbe_rx_refresh(struct xgbe_channel *channel)
48854 {
48855 struct xgbe_prv_data *pdata = channel->pdata;
48856- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48857+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48858 struct xgbe_ring *ring = channel->rx_ring;
48859 struct xgbe_ring_data *rdata;
48860
48861@@ -1537,8 +1537,8 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
48862 static int xgbe_tx_poll(struct xgbe_channel *channel)
48863 {
48864 struct xgbe_prv_data *pdata = channel->pdata;
48865- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48866- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48867+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48868+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48869 struct xgbe_ring *ring = channel->tx_ring;
48870 struct xgbe_ring_data *rdata;
48871 struct xgbe_ring_desc *rdesc;
48872@@ -1590,7 +1590,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
48873 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
48874 {
48875 struct xgbe_prv_data *pdata = channel->pdata;
48876- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48877+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48878 struct xgbe_ring *ring = channel->rx_ring;
48879 struct xgbe_ring_data *rdata;
48880 struct xgbe_packet_data *packet;
48881diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48882index 46f6130..f37dde3 100644
48883--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48884+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48885@@ -203,7 +203,7 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
48886
48887 DBGPR("-->%s\n", __func__);
48888
48889- pdata->hw_if.read_mmc_stats(pdata);
48890+ pdata->hw_if->read_mmc_stats(pdata);
48891 for (i = 0; i < XGBE_STATS_COUNT; i++) {
48892 stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
48893 *data++ = *(u64 *)stat;
48894@@ -378,7 +378,7 @@ static int xgbe_get_coalesce(struct net_device *netdev,
48895 struct ethtool_coalesce *ec)
48896 {
48897 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48898- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48899+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48900 unsigned int riwt;
48901
48902 DBGPR("-->xgbe_get_coalesce\n");
48903@@ -401,7 +401,7 @@ static int xgbe_set_coalesce(struct net_device *netdev,
48904 struct ethtool_coalesce *ec)
48905 {
48906 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48907- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48908+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48909 unsigned int rx_frames, rx_riwt, rx_usecs;
48910 unsigned int tx_frames, tx_usecs;
48911
48912diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48913index bdf9cfa..340aea1 100644
48914--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48915+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48916@@ -210,12 +210,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
48917 DBGPR("<--xgbe_default_config\n");
48918 }
48919
48920-static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
48921-{
48922- xgbe_init_function_ptrs_dev(&pdata->hw_if);
48923- xgbe_init_function_ptrs_desc(&pdata->desc_if);
48924-}
48925-
48926 static int xgbe_probe(struct platform_device *pdev)
48927 {
48928 struct xgbe_prv_data *pdata;
48929@@ -328,9 +322,8 @@ static int xgbe_probe(struct platform_device *pdev)
48930 netdev->base_addr = (unsigned long)pdata->xgmac_regs;
48931
48932 /* Set all the function pointers */
48933- xgbe_init_all_fptrs(pdata);
48934- hw_if = &pdata->hw_if;
48935- desc_if = &pdata->desc_if;
48936+ hw_if = pdata->hw_if = &default_xgbe_hw_if;
48937+ desc_if = pdata->desc_if = &default_xgbe_desc_if;
48938
48939 /* Issue software reset to device */
48940 hw_if->exit(pdata);
48941diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48942index 6d2221e..47d1325 100644
48943--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48944+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48945@@ -127,7 +127,7 @@
48946 static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
48947 {
48948 struct xgbe_prv_data *pdata = mii->priv;
48949- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48950+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48951 int mmd_data;
48952
48953 DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
48954@@ -144,7 +144,7 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
48955 u16 mmd_val)
48956 {
48957 struct xgbe_prv_data *pdata = mii->priv;
48958- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48959+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48960 int mmd_data = mmd_val;
48961
48962 DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
48963diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48964index 37e64cf..c3b61cf 100644
48965--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48966+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48967@@ -130,7 +130,7 @@ static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
48968 tstamp_cc);
48969 u64 nsec;
48970
48971- nsec = pdata->hw_if.get_tstamp_time(pdata);
48972+ nsec = pdata->hw_if->get_tstamp_time(pdata);
48973
48974 return nsec;
48975 }
48976@@ -159,7 +159,7 @@ static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
48977
48978 spin_lock_irqsave(&pdata->tstamp_lock, flags);
48979
48980- pdata->hw_if.update_tstamp_addend(pdata, addend);
48981+ pdata->hw_if->update_tstamp_addend(pdata, addend);
48982
48983 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
48984
48985diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
48986index e9fe6e6..875fbaf 100644
48987--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
48988+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
48989@@ -585,8 +585,8 @@ struct xgbe_prv_data {
48990
48991 int irq_number;
48992
48993- struct xgbe_hw_if hw_if;
48994- struct xgbe_desc_if desc_if;
48995+ const struct xgbe_hw_if *hw_if;
48996+ const struct xgbe_desc_if *desc_if;
48997
48998 /* AXI DMA settings */
48999 unsigned int axdomain;
49000@@ -699,6 +699,9 @@ struct xgbe_prv_data {
49001 #endif
49002 };
49003
49004+extern const struct xgbe_hw_if default_xgbe_hw_if;
49005+extern const struct xgbe_desc_if default_xgbe_desc_if;
49006+
49007 /* Function prototypes*/
49008
49009 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
49010diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
49011index 571427c..e9fe9e7 100644
49012--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
49013+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
49014@@ -1058,7 +1058,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
49015 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
49016 {
49017 /* RX_MODE controlling object */
49018- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
49019+ bnx2x_init_rx_mode_obj(bp);
49020
49021 /* multicast configuration controlling object */
49022 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
49023diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
49024index b193604..8873bfd 100644
49025--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
49026+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
49027@@ -2329,15 +2329,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
49028 return rc;
49029 }
49030
49031-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
49032- struct bnx2x_rx_mode_obj *o)
49033+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
49034 {
49035 if (CHIP_IS_E1x(bp)) {
49036- o->wait_comp = bnx2x_empty_rx_mode_wait;
49037- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
49038+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
49039+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
49040 } else {
49041- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
49042- o->config_rx_mode = bnx2x_set_rx_mode_e2;
49043+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
49044+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
49045 }
49046 }
49047
49048diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
49049index 718ecd2..2183b2f 100644
49050--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
49051+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
49052@@ -1340,8 +1340,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
49053
49054 /********************* RX MODE ****************/
49055
49056-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
49057- struct bnx2x_rx_mode_obj *o);
49058+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
49059
49060 /**
49061 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
49062diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
49063index 31c9f82..e65e986 100644
49064--- a/drivers/net/ethernet/broadcom/tg3.h
49065+++ b/drivers/net/ethernet/broadcom/tg3.h
49066@@ -150,6 +150,7 @@
49067 #define CHIPREV_ID_5750_A0 0x4000
49068 #define CHIPREV_ID_5750_A1 0x4001
49069 #define CHIPREV_ID_5750_A3 0x4003
49070+#define CHIPREV_ID_5750_C1 0x4201
49071 #define CHIPREV_ID_5750_C2 0x4202
49072 #define CHIPREV_ID_5752_A0_HW 0x5000
49073 #define CHIPREV_ID_5752_A0 0x6000
49074diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
49075index 13f9636..228040f 100644
49076--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
49077+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
49078@@ -1690,10 +1690,10 @@ bna_cb_ioceth_reset(void *arg)
49079 }
49080
49081 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
49082- bna_cb_ioceth_enable,
49083- bna_cb_ioceth_disable,
49084- bna_cb_ioceth_hbfail,
49085- bna_cb_ioceth_reset
49086+ .enable_cbfn = bna_cb_ioceth_enable,
49087+ .disable_cbfn = bna_cb_ioceth_disable,
49088+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
49089+ .reset_cbfn = bna_cb_ioceth_reset
49090 };
49091
49092 static void bna_attr_init(struct bna_ioceth *ioceth)
49093diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
49094index ffc92a4..40edc77 100644
49095--- a/drivers/net/ethernet/brocade/bna/bnad.c
49096+++ b/drivers/net/ethernet/brocade/bna/bnad.c
49097@@ -552,6 +552,7 @@ bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
49098
49099 len = (vec == nvecs) ?
49100 last_fraglen : unmap->vector.len;
49101+ skb->truesize += unmap->vector.len;
49102 totlen += len;
49103
49104 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
49105@@ -563,7 +564,6 @@ bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
49106
49107 skb->len += totlen;
49108 skb->data_len += totlen;
49109- skb->truesize += totlen;
49110 }
49111
49112 static inline void
49113diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
49114index 8cffcdf..aadf043 100644
49115--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
49116+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
49117@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
49118 */
49119 struct l2t_skb_cb {
49120 arp_failure_handler_func arp_failure_handler;
49121-};
49122+} __no_const;
49123
49124 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
49125
49126diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
49127index e5be511..16cb55c 100644
49128--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
49129+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
49130@@ -2355,7 +2355,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
49131
49132 int i;
49133 struct adapter *ap = netdev2adap(dev);
49134- static const unsigned int *reg_ranges;
49135+ const unsigned int *reg_ranges;
49136 int arr_size = 0, buf_size = 0;
49137
49138 if (is_t4(ap->params.chip)) {
49139diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
49140index cf8b6ff..274271e 100644
49141--- a/drivers/net/ethernet/dec/tulip/de4x5.c
49142+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
49143@@ -5387,7 +5387,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
49144 for (i=0; i<ETH_ALEN; i++) {
49145 tmp.addr[i] = dev->dev_addr[i];
49146 }
49147- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
49148+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
49149 break;
49150
49151 case DE4X5_SET_HWADDR: /* Set the hardware address */
49152@@ -5427,7 +5427,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
49153 spin_lock_irqsave(&lp->lock, flags);
49154 memcpy(&statbuf, &lp->pktStats, ioc->len);
49155 spin_unlock_irqrestore(&lp->lock, flags);
49156- if (copy_to_user(ioc->data, &statbuf, ioc->len))
49157+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
49158 return -EFAULT;
49159 break;
49160 }
49161diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
49162index 93ff8ef..01e0537 100644
49163--- a/drivers/net/ethernet/emulex/benet/be_main.c
49164+++ b/drivers/net/ethernet/emulex/benet/be_main.c
49165@@ -533,7 +533,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
49166
49167 if (wrapped)
49168 newacc += 65536;
49169- ACCESS_ONCE(*acc) = newacc;
49170+ ACCESS_ONCE_RW(*acc) = newacc;
49171 }
49172
49173 static void populate_erx_stats(struct be_adapter *adapter,
49174diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
49175index c77fa4a..7fd42fc 100644
49176--- a/drivers/net/ethernet/faraday/ftgmac100.c
49177+++ b/drivers/net/ethernet/faraday/ftgmac100.c
49178@@ -30,6 +30,8 @@
49179 #include <linux/netdevice.h>
49180 #include <linux/phy.h>
49181 #include <linux/platform_device.h>
49182+#include <linux/interrupt.h>
49183+#include <linux/irqreturn.h>
49184 #include <net/ip.h>
49185
49186 #include "ftgmac100.h"
49187diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
49188index 4ff1adc..0ea6bf4 100644
49189--- a/drivers/net/ethernet/faraday/ftmac100.c
49190+++ b/drivers/net/ethernet/faraday/ftmac100.c
49191@@ -31,6 +31,8 @@
49192 #include <linux/module.h>
49193 #include <linux/netdevice.h>
49194 #include <linux/platform_device.h>
49195+#include <linux/interrupt.h>
49196+#include <linux/irqreturn.h>
49197
49198 #include "ftmac100.h"
49199
49200diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
49201index 537b621..07f87ce 100644
49202--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
49203+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
49204@@ -401,7 +401,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
49205 wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
49206
49207 /* Update the base adjustement value. */
49208- ACCESS_ONCE(pf->ptp_base_adj) = incval;
49209+ ACCESS_ONCE_RW(pf->ptp_base_adj) = incval;
49210 smp_mb(); /* Force the above update. */
49211 }
49212
49213diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
49214index 5fd4b52..87aa34b 100644
49215--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
49216+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
49217@@ -794,7 +794,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
49218 }
49219
49220 /* update the base incval used to calculate frequency adjustment */
49221- ACCESS_ONCE(adapter->base_incval) = incval;
49222+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
49223 smp_mb();
49224
49225 /* need lock to prevent incorrect read while modifying cyclecounter */
49226diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
49227index c14d4d8..66da603 100644
49228--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
49229+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
49230@@ -1259,6 +1259,9 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
49231 struct ixgbe_hw *hw = &adapter->hw;
49232 u32 regval;
49233
49234+ if (vf >= adapter->num_vfs)
49235+ return -EINVAL;
49236+
49237 adapter->vfinfo[vf].spoofchk_enabled = setting;
49238
49239 regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
49240diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
49241index 2bbd01f..e8baa64 100644
49242--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
49243+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
49244@@ -3457,7 +3457,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
49245 struct __vxge_hw_fifo *fifo;
49246 struct vxge_hw_fifo_config *config;
49247 u32 txdl_size, txdl_per_memblock;
49248- struct vxge_hw_mempool_cbs fifo_mp_callback;
49249+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
49250+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
49251+ };
49252+
49253 struct __vxge_hw_virtualpath *vpath;
49254
49255 if ((vp == NULL) || (attr == NULL)) {
49256@@ -3540,8 +3543,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
49257 goto exit;
49258 }
49259
49260- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
49261-
49262 fifo->mempool =
49263 __vxge_hw_mempool_create(vpath->hldev,
49264 fifo->config->memblock_size,
49265diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
49266index 3172cdf..d01ab34 100644
49267--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
49268+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
49269@@ -2190,7 +2190,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
49270 max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
49271 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
49272 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
49273- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
49274+ pax_open_kernel();
49275+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
49276+ pax_close_kernel();
49277 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
49278 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
49279 max_tx_rings = QLCNIC_MAX_TX_RINGS;
49280diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
49281index be7d7a6..a8983f8 100644
49282--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
49283+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
49284@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
49285 case QLCNIC_NON_PRIV_FUNC:
49286 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
49287 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
49288- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
49289+ pax_open_kernel();
49290+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
49291+ pax_close_kernel();
49292 break;
49293 case QLCNIC_PRIV_FUNC:
49294 ahw->op_mode = QLCNIC_PRIV_FUNC;
49295 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
49296- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
49297+ pax_open_kernel();
49298+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
49299+ pax_close_kernel();
49300 break;
49301 case QLCNIC_MGMT_FUNC:
49302 ahw->op_mode = QLCNIC_MGMT_FUNC;
49303 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
49304- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
49305+ pax_open_kernel();
49306+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
49307+ pax_close_kernel();
49308 break;
49309 default:
49310 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
49311diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
49312index c9f57fb..208bdc1 100644
49313--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
49314+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
49315@@ -1285,7 +1285,7 @@ flash_temp:
49316 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
49317 {
49318 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
49319- static const struct qlcnic_dump_operations *fw_dump_ops;
49320+ const struct qlcnic_dump_operations *fw_dump_ops;
49321 struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
49322 u32 entry_offset, dump, no_entries, buf_offset = 0;
49323 int i, k, ops_cnt, ops_index, dump_size = 0;
49324diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
49325index 0921302..927f761 100644
49326--- a/drivers/net/ethernet/realtek/r8169.c
49327+++ b/drivers/net/ethernet/realtek/r8169.c
49328@@ -744,22 +744,22 @@ struct rtl8169_private {
49329 struct mdio_ops {
49330 void (*write)(struct rtl8169_private *, int, int);
49331 int (*read)(struct rtl8169_private *, int);
49332- } mdio_ops;
49333+ } __no_const mdio_ops;
49334
49335 struct pll_power_ops {
49336 void (*down)(struct rtl8169_private *);
49337 void (*up)(struct rtl8169_private *);
49338- } pll_power_ops;
49339+ } __no_const pll_power_ops;
49340
49341 struct jumbo_ops {
49342 void (*enable)(struct rtl8169_private *);
49343 void (*disable)(struct rtl8169_private *);
49344- } jumbo_ops;
49345+ } __no_const jumbo_ops;
49346
49347 struct csi_ops {
49348 void (*write)(struct rtl8169_private *, int, int);
49349 u32 (*read)(struct rtl8169_private *, int);
49350- } csi_ops;
49351+ } __no_const csi_ops;
49352
49353 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
49354 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
49355diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
49356index 6b861e3..204ac86 100644
49357--- a/drivers/net/ethernet/sfc/ptp.c
49358+++ b/drivers/net/ethernet/sfc/ptp.c
49359@@ -822,7 +822,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
49360 ptp->start.dma_addr);
49361
49362 /* Clear flag that signals MC ready */
49363- ACCESS_ONCE(*start) = 0;
49364+ ACCESS_ONCE_RW(*start) = 0;
49365 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
49366 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
49367 EFX_BUG_ON_PARANOID(rc);
49368diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49369index 08c483b..2c4a553 100644
49370--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49371+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49372@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
49373
49374 writel(value, ioaddr + MMC_CNTRL);
49375
49376- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
49377- MMC_CNTRL, value);
49378+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
49379+// MMC_CNTRL, value);
49380 }
49381
49382 /* To mask all all interrupts.*/
49383diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
49384index d5e07de..e3bf20a 100644
49385--- a/drivers/net/hyperv/hyperv_net.h
49386+++ b/drivers/net/hyperv/hyperv_net.h
49387@@ -171,7 +171,7 @@ struct rndis_device {
49388 enum rndis_device_state state;
49389 bool link_state;
49390 bool link_change;
49391- atomic_t new_req_id;
49392+ atomic_unchecked_t new_req_id;
49393
49394 spinlock_t request_lock;
49395 struct list_head req_list;
49396diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
49397index 0fcb5e7..148fda3 100644
49398--- a/drivers/net/hyperv/netvsc_drv.c
49399+++ b/drivers/net/hyperv/netvsc_drv.c
49400@@ -556,6 +556,7 @@ do_lso:
49401 do_send:
49402 /* Start filling in the page buffers with the rndis hdr */
49403 rndis_msg->msg_len += rndis_msg_size;
49404+ packet->total_data_buflen = rndis_msg->msg_len;
49405 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
49406 skb, &packet->page_buf[0]);
49407
49408diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
49409index 2b86f0b..ecc996f 100644
49410--- a/drivers/net/hyperv/rndis_filter.c
49411+++ b/drivers/net/hyperv/rndis_filter.c
49412@@ -102,7 +102,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
49413 * template
49414 */
49415 set = &rndis_msg->msg.set_req;
49416- set->req_id = atomic_inc_return(&dev->new_req_id);
49417+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
49418
49419 /* Add to the request list */
49420 spin_lock_irqsave(&dev->request_lock, flags);
49421@@ -911,7 +911,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
49422
49423 /* Setup the rndis set */
49424 halt = &request->request_msg.msg.halt_req;
49425- halt->req_id = atomic_inc_return(&dev->new_req_id);
49426+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
49427
49428 /* Ignore return since this msg is optional. */
49429 rndis_filter_send_request(dev, request);
49430diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
49431index 9ce854f..e43fa17 100644
49432--- a/drivers/net/ieee802154/fakehard.c
49433+++ b/drivers/net/ieee802154/fakehard.c
49434@@ -365,7 +365,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
49435 phy->transmit_power = 0xbf;
49436
49437 dev->netdev_ops = &fake_ops;
49438- dev->ml_priv = &fake_mlme;
49439+ dev->ml_priv = (void *)&fake_mlme;
49440
49441 priv = netdev_priv(dev);
49442 priv->phy = phy;
49443diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
49444index 726edab..8939092 100644
49445--- a/drivers/net/macvlan.c
49446+++ b/drivers/net/macvlan.c
49447@@ -264,7 +264,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
49448 free_nskb:
49449 kfree_skb(nskb);
49450 err:
49451- atomic_long_inc(&skb->dev->rx_dropped);
49452+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
49453 }
49454
49455 /* called under rcu_read_lock() from netif_receive_skb */
49456@@ -1144,13 +1144,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
49457 int macvlan_link_register(struct rtnl_link_ops *ops)
49458 {
49459 /* common fields */
49460- ops->priv_size = sizeof(struct macvlan_dev);
49461- ops->validate = macvlan_validate;
49462- ops->maxtype = IFLA_MACVLAN_MAX;
49463- ops->policy = macvlan_policy;
49464- ops->changelink = macvlan_changelink;
49465- ops->get_size = macvlan_get_size;
49466- ops->fill_info = macvlan_fill_info;
49467+ pax_open_kernel();
49468+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
49469+ *(void **)&ops->validate = macvlan_validate;
49470+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
49471+ *(const void **)&ops->policy = macvlan_policy;
49472+ *(void **)&ops->changelink = macvlan_changelink;
49473+ *(void **)&ops->get_size = macvlan_get_size;
49474+ *(void **)&ops->fill_info = macvlan_fill_info;
49475+ pax_close_kernel();
49476
49477 return rtnl_link_register(ops);
49478 };
49479@@ -1230,7 +1232,7 @@ static int macvlan_device_event(struct notifier_block *unused,
49480 return NOTIFY_DONE;
49481 }
49482
49483-static struct notifier_block macvlan_notifier_block __read_mostly = {
49484+static struct notifier_block macvlan_notifier_block = {
49485 .notifier_call = macvlan_device_event,
49486 };
49487
49488diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
49489index 0c6adaa..0784e3f 100644
49490--- a/drivers/net/macvtap.c
49491+++ b/drivers/net/macvtap.c
49492@@ -1018,7 +1018,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
49493 }
49494
49495 ret = 0;
49496- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
49497+ if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
49498 put_user(q->flags, &ifr->ifr_flags))
49499 ret = -EFAULT;
49500 macvtap_put_vlan(vlan);
49501@@ -1188,7 +1188,7 @@ static int macvtap_device_event(struct notifier_block *unused,
49502 return NOTIFY_DONE;
49503 }
49504
49505-static struct notifier_block macvtap_notifier_block __read_mostly = {
49506+static struct notifier_block macvtap_notifier_block = {
49507 .notifier_call = macvtap_device_event,
49508 };
49509
49510diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
49511index fa0d717..bab8c01 100644
49512--- a/drivers/net/ppp/ppp_generic.c
49513+++ b/drivers/net/ppp/ppp_generic.c
49514@@ -594,7 +594,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
49515 if (file == ppp->owner)
49516 ppp_shutdown_interface(ppp);
49517 }
49518- if (atomic_long_read(&file->f_count) <= 2) {
49519+ if (atomic_long_read(&file->f_count) < 2) {
49520 ppp_release(NULL, file);
49521 err = 0;
49522 } else
49523@@ -1020,7 +1020,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
49524 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
49525 struct ppp_stats stats;
49526 struct ppp_comp_stats cstats;
49527- char *vers;
49528
49529 switch (cmd) {
49530 case SIOCGPPPSTATS:
49531@@ -1042,8 +1041,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
49532 break;
49533
49534 case SIOCGPPPVER:
49535- vers = PPP_VERSION;
49536- if (copy_to_user(addr, vers, strlen(vers) + 1))
49537+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
49538 break;
49539 err = 0;
49540 break;
49541diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
49542index 079f7ad..b2a2bfa7 100644
49543--- a/drivers/net/slip/slhc.c
49544+++ b/drivers/net/slip/slhc.c
49545@@ -487,7 +487,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
49546 register struct tcphdr *thp;
49547 register struct iphdr *ip;
49548 register struct cstate *cs;
49549- int len, hdrlen;
49550+ long len, hdrlen;
49551 unsigned char *cp = icp;
49552
49553 /* We've got a compressed packet; read the change byte */
49554diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
49555index 1f76c2ea..9681171 100644
49556--- a/drivers/net/team/team.c
49557+++ b/drivers/net/team/team.c
49558@@ -2862,7 +2862,7 @@ static int team_device_event(struct notifier_block *unused,
49559 return NOTIFY_DONE;
49560 }
49561
49562-static struct notifier_block team_notifier_block __read_mostly = {
49563+static struct notifier_block team_notifier_block = {
49564 .notifier_call = team_device_event,
49565 };
49566
49567diff --git a/drivers/net/tun.c b/drivers/net/tun.c
49568index acaaf67..a33483d 100644
49569--- a/drivers/net/tun.c
49570+++ b/drivers/net/tun.c
49571@@ -1855,7 +1855,7 @@ unlock:
49572 }
49573
49574 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
49575- unsigned long arg, int ifreq_len)
49576+ unsigned long arg, size_t ifreq_len)
49577 {
49578 struct tun_file *tfile = file->private_data;
49579 struct tun_struct *tun;
49580@@ -1868,6 +1868,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
49581 unsigned int ifindex;
49582 int ret;
49583
49584+ if (ifreq_len > sizeof ifr)
49585+ return -EFAULT;
49586+
49587 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
49588 if (copy_from_user(&ifr, argp, ifreq_len))
49589 return -EFAULT;
49590diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
49591index babda7d..e40c90a 100644
49592--- a/drivers/net/usb/hso.c
49593+++ b/drivers/net/usb/hso.c
49594@@ -71,7 +71,7 @@
49595 #include <asm/byteorder.h>
49596 #include <linux/serial_core.h>
49597 #include <linux/serial.h>
49598-
49599+#include <asm/local.h>
49600
49601 #define MOD_AUTHOR "Option Wireless"
49602 #define MOD_DESCRIPTION "USB High Speed Option driver"
49603@@ -1178,7 +1178,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
49604 struct urb *urb;
49605
49606 urb = serial->rx_urb[0];
49607- if (serial->port.count > 0) {
49608+ if (atomic_read(&serial->port.count) > 0) {
49609 count = put_rxbuf_data(urb, serial);
49610 if (count == -1)
49611 return;
49612@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
49613 DUMP1(urb->transfer_buffer, urb->actual_length);
49614
49615 /* Anyone listening? */
49616- if (serial->port.count == 0)
49617+ if (atomic_read(&serial->port.count) == 0)
49618 return;
49619
49620 if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
49621@@ -1278,8 +1278,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
49622 tty_port_tty_set(&serial->port, tty);
49623
49624 /* check for port already opened, if not set the termios */
49625- serial->port.count++;
49626- if (serial->port.count == 1) {
49627+ if (atomic_inc_return(&serial->port.count) == 1) {
49628 serial->rx_state = RX_IDLE;
49629 /* Force default termio settings */
49630 _hso_serial_set_termios(tty, NULL);
49631@@ -1289,7 +1288,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
49632 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
49633 if (result) {
49634 hso_stop_serial_device(serial->parent);
49635- serial->port.count--;
49636+ atomic_dec(&serial->port.count);
49637 kref_put(&serial->parent->ref, hso_serial_ref_free);
49638 }
49639 } else {
49640@@ -1326,10 +1325,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
49641
49642 /* reset the rts and dtr */
49643 /* do the actual close */
49644- serial->port.count--;
49645+ atomic_dec(&serial->port.count);
49646
49647- if (serial->port.count <= 0) {
49648- serial->port.count = 0;
49649+ if (atomic_read(&serial->port.count) <= 0) {
49650+ atomic_set(&serial->port.count, 0);
49651 tty_port_tty_set(&serial->port, NULL);
49652 if (!usb_gone)
49653 hso_stop_serial_device(serial->parent);
49654@@ -1404,7 +1403,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
49655
49656 /* the actual setup */
49657 spin_lock_irqsave(&serial->serial_lock, flags);
49658- if (serial->port.count)
49659+ if (atomic_read(&serial->port.count))
49660 _hso_serial_set_termios(tty, old);
49661 else
49662 tty->termios = *old;
49663@@ -1873,7 +1872,7 @@ static void intr_callback(struct urb *urb)
49664 D1("Pending read interrupt on port %d\n", i);
49665 spin_lock(&serial->serial_lock);
49666 if (serial->rx_state == RX_IDLE &&
49667- serial->port.count > 0) {
49668+ atomic_read(&serial->port.count) > 0) {
49669 /* Setup and send a ctrl req read on
49670 * port i */
49671 if (!serial->rx_urb_filled[0]) {
49672@@ -3047,7 +3046,7 @@ static int hso_resume(struct usb_interface *iface)
49673 /* Start all serial ports */
49674 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
49675 if (serial_table[i] && (serial_table[i]->interface == iface)) {
49676- if (dev2ser(serial_table[i])->port.count) {
49677+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
49678 result =
49679 hso_start_serial_device(serial_table[i], GFP_NOIO);
49680 hso_kick_transmit(dev2ser(serial_table[i]));
49681diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
49682index 604ef21..d1f49a1 100644
49683--- a/drivers/net/usb/r8152.c
49684+++ b/drivers/net/usb/r8152.c
49685@@ -575,7 +575,7 @@ struct r8152 {
49686 void (*up)(struct r8152 *);
49687 void (*down)(struct r8152 *);
49688 void (*unload)(struct r8152 *);
49689- } rtl_ops;
49690+ } __no_const rtl_ops;
49691
49692 int intr_interval;
49693 u32 saved_wolopts;
49694diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
49695index a2515887..6d13233 100644
49696--- a/drivers/net/usb/sierra_net.c
49697+++ b/drivers/net/usb/sierra_net.c
49698@@ -51,7 +51,7 @@ static const char driver_name[] = "sierra_net";
49699 /* atomic counter partially included in MAC address to make sure 2 devices
49700 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
49701 */
49702-static atomic_t iface_counter = ATOMIC_INIT(0);
49703+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
49704
49705 /*
49706 * SYNC Timer Delay definition used to set the expiry time
49707@@ -697,7 +697,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
49708 dev->net->netdev_ops = &sierra_net_device_ops;
49709
49710 /* change MAC addr to include, ifacenum, and to be unique */
49711- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
49712+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
49713 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
49714
49715 /* we will have to manufacture ethernet headers, prepare template */
49716diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
49717index 59caa06..de191b3 100644
49718--- a/drivers/net/virtio_net.c
49719+++ b/drivers/net/virtio_net.c
49720@@ -48,7 +48,7 @@ module_param(gso, bool, 0444);
49721 #define RECEIVE_AVG_WEIGHT 64
49722
49723 /* Minimum alignment for mergeable packet buffers. */
49724-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
49725+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256UL)
49726
49727 #define VIRTNET_DRIVER_VERSION "1.0.0"
49728
49729diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
49730index beb377b..b5bbf08 100644
49731--- a/drivers/net/vxlan.c
49732+++ b/drivers/net/vxlan.c
49733@@ -1440,9 +1440,6 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
49734 if (!in6_dev)
49735 goto out;
49736
49737- if (!pskb_may_pull(skb, skb->len))
49738- goto out;
49739-
49740 iphdr = ipv6_hdr(skb);
49741 saddr = &iphdr->saddr;
49742 daddr = &iphdr->daddr;
49743@@ -1717,6 +1714,8 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
49744 struct pcpu_sw_netstats *tx_stats, *rx_stats;
49745 union vxlan_addr loopback;
49746 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
49747+ struct net_device *dev = skb->dev;
49748+ int len = skb->len;
49749
49750 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
49751 rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
49752@@ -1740,16 +1739,16 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
49753
49754 u64_stats_update_begin(&tx_stats->syncp);
49755 tx_stats->tx_packets++;
49756- tx_stats->tx_bytes += skb->len;
49757+ tx_stats->tx_bytes += len;
49758 u64_stats_update_end(&tx_stats->syncp);
49759
49760 if (netif_rx(skb) == NET_RX_SUCCESS) {
49761 u64_stats_update_begin(&rx_stats->syncp);
49762 rx_stats->rx_packets++;
49763- rx_stats->rx_bytes += skb->len;
49764+ rx_stats->rx_bytes += len;
49765 u64_stats_update_end(&rx_stats->syncp);
49766 } else {
49767- skb->dev->stats.rx_dropped++;
49768+ dev->stats.rx_dropped++;
49769 }
49770 }
49771
49772@@ -1927,7 +1926,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
49773 return arp_reduce(dev, skb);
49774 #if IS_ENABLED(CONFIG_IPV6)
49775 else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
49776- skb->len >= sizeof(struct ipv6hdr) + sizeof(struct nd_msg) &&
49777+ pskb_may_pull(skb, sizeof(struct ipv6hdr)
49778+ + sizeof(struct nd_msg)) &&
49779 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
49780 struct nd_msg *msg;
49781
49782@@ -2750,7 +2750,7 @@ nla_put_failure:
49783 return -EMSGSIZE;
49784 }
49785
49786-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
49787+static struct rtnl_link_ops vxlan_link_ops = {
49788 .kind = "vxlan",
49789 .maxtype = IFLA_VXLAN_MAX,
49790 .policy = vxlan_policy,
49791@@ -2797,7 +2797,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
49792 return NOTIFY_DONE;
49793 }
49794
49795-static struct notifier_block vxlan_notifier_block __read_mostly = {
49796+static struct notifier_block vxlan_notifier_block = {
49797 .notifier_call = vxlan_lowerdev_event,
49798 };
49799
49800diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
49801index 5920c99..ff2e4a5 100644
49802--- a/drivers/net/wan/lmc/lmc_media.c
49803+++ b/drivers/net/wan/lmc/lmc_media.c
49804@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
49805 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
49806
49807 lmc_media_t lmc_ds3_media = {
49808- lmc_ds3_init, /* special media init stuff */
49809- lmc_ds3_default, /* reset to default state */
49810- lmc_ds3_set_status, /* reset status to state provided */
49811- lmc_dummy_set_1, /* set clock source */
49812- lmc_dummy_set2_1, /* set line speed */
49813- lmc_ds3_set_100ft, /* set cable length */
49814- lmc_ds3_set_scram, /* set scrambler */
49815- lmc_ds3_get_link_status, /* get link status */
49816- lmc_dummy_set_1, /* set link status */
49817- lmc_ds3_set_crc_length, /* set CRC length */
49818- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49819- lmc_ds3_watchdog
49820+ .init = lmc_ds3_init, /* special media init stuff */
49821+ .defaults = lmc_ds3_default, /* reset to default state */
49822+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
49823+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
49824+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49825+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
49826+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
49827+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
49828+ .set_link_status = lmc_dummy_set_1, /* set link status */
49829+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
49830+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49831+ .watchdog = lmc_ds3_watchdog
49832 };
49833
49834 lmc_media_t lmc_hssi_media = {
49835- lmc_hssi_init, /* special media init stuff */
49836- lmc_hssi_default, /* reset to default state */
49837- lmc_hssi_set_status, /* reset status to state provided */
49838- lmc_hssi_set_clock, /* set clock source */
49839- lmc_dummy_set2_1, /* set line speed */
49840- lmc_dummy_set_1, /* set cable length */
49841- lmc_dummy_set_1, /* set scrambler */
49842- lmc_hssi_get_link_status, /* get link status */
49843- lmc_hssi_set_link_status, /* set link status */
49844- lmc_hssi_set_crc_length, /* set CRC length */
49845- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49846- lmc_hssi_watchdog
49847+ .init = lmc_hssi_init, /* special media init stuff */
49848+ .defaults = lmc_hssi_default, /* reset to default state */
49849+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
49850+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
49851+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49852+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49853+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49854+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
49855+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
49856+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
49857+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49858+ .watchdog = lmc_hssi_watchdog
49859 };
49860
49861-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
49862- lmc_ssi_default, /* reset to default state */
49863- lmc_ssi_set_status, /* reset status to state provided */
49864- lmc_ssi_set_clock, /* set clock source */
49865- lmc_ssi_set_speed, /* set line speed */
49866- lmc_dummy_set_1, /* set cable length */
49867- lmc_dummy_set_1, /* set scrambler */
49868- lmc_ssi_get_link_status, /* get link status */
49869- lmc_ssi_set_link_status, /* set link status */
49870- lmc_ssi_set_crc_length, /* set CRC length */
49871- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49872- lmc_ssi_watchdog
49873+lmc_media_t lmc_ssi_media = {
49874+ .init = lmc_ssi_init, /* special media init stuff */
49875+ .defaults = lmc_ssi_default, /* reset to default state */
49876+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
49877+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
49878+ .set_speed = lmc_ssi_set_speed, /* set line speed */
49879+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49880+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49881+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
49882+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
49883+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
49884+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49885+ .watchdog = lmc_ssi_watchdog
49886 };
49887
49888 lmc_media_t lmc_t1_media = {
49889- lmc_t1_init, /* special media init stuff */
49890- lmc_t1_default, /* reset to default state */
49891- lmc_t1_set_status, /* reset status to state provided */
49892- lmc_t1_set_clock, /* set clock source */
49893- lmc_dummy_set2_1, /* set line speed */
49894- lmc_dummy_set_1, /* set cable length */
49895- lmc_dummy_set_1, /* set scrambler */
49896- lmc_t1_get_link_status, /* get link status */
49897- lmc_dummy_set_1, /* set link status */
49898- lmc_t1_set_crc_length, /* set CRC length */
49899- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
49900- lmc_t1_watchdog
49901+ .init = lmc_t1_init, /* special media init stuff */
49902+ .defaults = lmc_t1_default, /* reset to default state */
49903+ .set_status = lmc_t1_set_status, /* reset status to state provided */
49904+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
49905+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49906+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49907+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49908+ .get_link_status = lmc_t1_get_link_status, /* get link status */
49909+ .set_link_status = lmc_dummy_set_1, /* set link status */
49910+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
49911+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
49912+ .watchdog = lmc_t1_watchdog
49913 };
49914
49915 static void
49916diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
49917index feacc3b..5bac0de 100644
49918--- a/drivers/net/wan/z85230.c
49919+++ b/drivers/net/wan/z85230.c
49920@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
49921
49922 struct z8530_irqhandler z8530_sync =
49923 {
49924- z8530_rx,
49925- z8530_tx,
49926- z8530_status
49927+ .rx = z8530_rx,
49928+ .tx = z8530_tx,
49929+ .status = z8530_status
49930 };
49931
49932 EXPORT_SYMBOL(z8530_sync);
49933@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
49934 }
49935
49936 static struct z8530_irqhandler z8530_dma_sync = {
49937- z8530_dma_rx,
49938- z8530_dma_tx,
49939- z8530_dma_status
49940+ .rx = z8530_dma_rx,
49941+ .tx = z8530_dma_tx,
49942+ .status = z8530_dma_status
49943 };
49944
49945 static struct z8530_irqhandler z8530_txdma_sync = {
49946- z8530_rx,
49947- z8530_dma_tx,
49948- z8530_dma_status
49949+ .rx = z8530_rx,
49950+ .tx = z8530_dma_tx,
49951+ .status = z8530_dma_status
49952 };
49953
49954 /**
49955@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
49956
49957 struct z8530_irqhandler z8530_nop=
49958 {
49959- z8530_rx_clear,
49960- z8530_tx_clear,
49961- z8530_status_clear
49962+ .rx = z8530_rx_clear,
49963+ .tx = z8530_tx_clear,
49964+ .status = z8530_status_clear
49965 };
49966
49967
49968diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
49969index 0b60295..b8bfa5b 100644
49970--- a/drivers/net/wimax/i2400m/rx.c
49971+++ b/drivers/net/wimax/i2400m/rx.c
49972@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
49973 if (i2400m->rx_roq == NULL)
49974 goto error_roq_alloc;
49975
49976- rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
49977+ rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
49978 GFP_KERNEL);
49979 if (rd == NULL) {
49980 result = -ENOMEM;
49981diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
49982index e71a2ce..2268d61 100644
49983--- a/drivers/net/wireless/airo.c
49984+++ b/drivers/net/wireless/airo.c
49985@@ -7846,7 +7846,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
49986 struct airo_info *ai = dev->ml_priv;
49987 int ridcode;
49988 int enabled;
49989- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
49990+ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
49991 unsigned char *iobuf;
49992
49993 /* Only super-user can write RIDs */
49994diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
49995index da92bfa..5a9001a 100644
49996--- a/drivers/net/wireless/at76c50x-usb.c
49997+++ b/drivers/net/wireless/at76c50x-usb.c
49998@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
49999 }
50000
50001 /* Convert timeout from the DFU status to jiffies */
50002-static inline unsigned long at76_get_timeout(struct dfu_status *s)
50003+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
50004 {
50005 return msecs_to_jiffies((s->poll_timeout[2] << 16)
50006 | (s->poll_timeout[1] << 8)
50007diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
50008index 5fdc40d..3975205 100644
50009--- a/drivers/net/wireless/ath/ath10k/htc.c
50010+++ b/drivers/net/wireless/ath/ath10k/htc.c
50011@@ -856,7 +856,10 @@ void ath10k_htc_stop(struct ath10k_htc *htc)
50012 /* registered target arrival callback from the HIF layer */
50013 int ath10k_htc_init(struct ath10k *ar)
50014 {
50015- struct ath10k_hif_cb htc_callbacks;
50016+ static struct ath10k_hif_cb htc_callbacks = {
50017+ .rx_completion = ath10k_htc_rx_completion_handler,
50018+ .tx_completion = ath10k_htc_tx_completion_handler,
50019+ };
50020 struct ath10k_htc_ep *ep = NULL;
50021 struct ath10k_htc *htc = &ar->htc;
50022
50023@@ -866,8 +869,6 @@ int ath10k_htc_init(struct ath10k *ar)
50024 ath10k_htc_reset_endpoint_states(htc);
50025
50026 /* setup HIF layer callbacks */
50027- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
50028- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
50029 htc->ar = ar;
50030
50031 /* Get HIF default pipe for HTC message exchange */
50032diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
50033index 4716d33..a688310 100644
50034--- a/drivers/net/wireless/ath/ath10k/htc.h
50035+++ b/drivers/net/wireless/ath/ath10k/htc.h
50036@@ -271,13 +271,13 @@ enum ath10k_htc_ep_id {
50037
50038 struct ath10k_htc_ops {
50039 void (*target_send_suspend_complete)(struct ath10k *ar);
50040-};
50041+} __no_const;
50042
50043 struct ath10k_htc_ep_ops {
50044 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
50045 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
50046 void (*ep_tx_credits)(struct ath10k *);
50047-};
50048+} __no_const;
50049
50050 /* service connection information */
50051 struct ath10k_htc_svc_conn_req {
50052diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
50053index 59af9f9..5f3564f 100644
50054--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
50055+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
50056@@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50057 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
50058 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
50059
50060- ACCESS_ONCE(ads->ds_link) = i->link;
50061- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
50062+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
50063+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
50064
50065 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
50066 ctl6 = SM(i->keytype, AR_EncrType);
50067@@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50068
50069 if ((i->is_first || i->is_last) &&
50070 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
50071- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
50072+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
50073 | set11nTries(i->rates, 1)
50074 | set11nTries(i->rates, 2)
50075 | set11nTries(i->rates, 3)
50076 | (i->dur_update ? AR_DurUpdateEna : 0)
50077 | SM(0, AR_BurstDur);
50078
50079- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
50080+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
50081 | set11nRate(i->rates, 1)
50082 | set11nRate(i->rates, 2)
50083 | set11nRate(i->rates, 3);
50084 } else {
50085- ACCESS_ONCE(ads->ds_ctl2) = 0;
50086- ACCESS_ONCE(ads->ds_ctl3) = 0;
50087+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
50088+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
50089 }
50090
50091 if (!i->is_first) {
50092- ACCESS_ONCE(ads->ds_ctl0) = 0;
50093- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
50094- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
50095+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
50096+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
50097+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
50098 return;
50099 }
50100
50101@@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50102 break;
50103 }
50104
50105- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
50106+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
50107 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
50108 | SM(i->txpower, AR_XmitPower0)
50109 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
50110@@ -289,27 +289,27 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50111 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
50112 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
50113
50114- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
50115- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
50116+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
50117+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
50118
50119 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
50120 return;
50121
50122- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
50123+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
50124 | set11nPktDurRTSCTS(i->rates, 1);
50125
50126- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
50127+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
50128 | set11nPktDurRTSCTS(i->rates, 3);
50129
50130- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
50131+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
50132 | set11nRateFlags(i->rates, 1)
50133 | set11nRateFlags(i->rates, 2)
50134 | set11nRateFlags(i->rates, 3)
50135 | SM(i->rtscts_rate, AR_RTSCTSRate);
50136
50137- ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower, AR_XmitPower1);
50138- ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower, AR_XmitPower2);
50139- ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower, AR_XmitPower3);
50140+ ACCESS_ONCE_RW(ads->ds_ctl9) = SM(i->txpower, AR_XmitPower1);
50141+ ACCESS_ONCE_RW(ads->ds_ctl10) = SM(i->txpower, AR_XmitPower2);
50142+ ACCESS_ONCE_RW(ads->ds_ctl11) = SM(i->txpower, AR_XmitPower3);
50143 }
50144
50145 static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
50146diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
50147index 71e38e8..5ac96ca 100644
50148--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
50149+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
50150@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50151 (i->qcu << AR_TxQcuNum_S) | desc_len;
50152
50153 checksum += val;
50154- ACCESS_ONCE(ads->info) = val;
50155+ ACCESS_ONCE_RW(ads->info) = val;
50156
50157 checksum += i->link;
50158- ACCESS_ONCE(ads->link) = i->link;
50159+ ACCESS_ONCE_RW(ads->link) = i->link;
50160
50161 checksum += i->buf_addr[0];
50162- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
50163+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
50164 checksum += i->buf_addr[1];
50165- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
50166+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
50167 checksum += i->buf_addr[2];
50168- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
50169+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
50170 checksum += i->buf_addr[3];
50171- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
50172+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
50173
50174 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
50175- ACCESS_ONCE(ads->ctl3) = val;
50176+ ACCESS_ONCE_RW(ads->ctl3) = val;
50177 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
50178- ACCESS_ONCE(ads->ctl5) = val;
50179+ ACCESS_ONCE_RW(ads->ctl5) = val;
50180 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
50181- ACCESS_ONCE(ads->ctl7) = val;
50182+ ACCESS_ONCE_RW(ads->ctl7) = val;
50183 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
50184- ACCESS_ONCE(ads->ctl9) = val;
50185+ ACCESS_ONCE_RW(ads->ctl9) = val;
50186
50187 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
50188- ACCESS_ONCE(ads->ctl10) = checksum;
50189+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
50190
50191 if (i->is_first || i->is_last) {
50192- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
50193+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
50194 | set11nTries(i->rates, 1)
50195 | set11nTries(i->rates, 2)
50196 | set11nTries(i->rates, 3)
50197 | (i->dur_update ? AR_DurUpdateEna : 0)
50198 | SM(0, AR_BurstDur);
50199
50200- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
50201+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
50202 | set11nRate(i->rates, 1)
50203 | set11nRate(i->rates, 2)
50204 | set11nRate(i->rates, 3);
50205 } else {
50206- ACCESS_ONCE(ads->ctl13) = 0;
50207- ACCESS_ONCE(ads->ctl14) = 0;
50208+ ACCESS_ONCE_RW(ads->ctl13) = 0;
50209+ ACCESS_ONCE_RW(ads->ctl14) = 0;
50210 }
50211
50212 ads->ctl20 = 0;
50213@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50214
50215 ctl17 = SM(i->keytype, AR_EncrType);
50216 if (!i->is_first) {
50217- ACCESS_ONCE(ads->ctl11) = 0;
50218- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
50219- ACCESS_ONCE(ads->ctl15) = 0;
50220- ACCESS_ONCE(ads->ctl16) = 0;
50221- ACCESS_ONCE(ads->ctl17) = ctl17;
50222- ACCESS_ONCE(ads->ctl18) = 0;
50223- ACCESS_ONCE(ads->ctl19) = 0;
50224+ ACCESS_ONCE_RW(ads->ctl11) = 0;
50225+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
50226+ ACCESS_ONCE_RW(ads->ctl15) = 0;
50227+ ACCESS_ONCE_RW(ads->ctl16) = 0;
50228+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
50229+ ACCESS_ONCE_RW(ads->ctl18) = 0;
50230+ ACCESS_ONCE_RW(ads->ctl19) = 0;
50231 return;
50232 }
50233
50234- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
50235+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
50236 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
50237 | SM(i->txpower, AR_XmitPower0)
50238 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
50239@@ -135,26 +135,26 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50240 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
50241 ctl12 |= SM(val, AR_PAPRDChainMask);
50242
50243- ACCESS_ONCE(ads->ctl12) = ctl12;
50244- ACCESS_ONCE(ads->ctl17) = ctl17;
50245+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
50246+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
50247
50248- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
50249+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
50250 | set11nPktDurRTSCTS(i->rates, 1);
50251
50252- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
50253+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
50254 | set11nPktDurRTSCTS(i->rates, 3);
50255
50256- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
50257+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
50258 | set11nRateFlags(i->rates, 1)
50259 | set11nRateFlags(i->rates, 2)
50260 | set11nRateFlags(i->rates, 3)
50261 | SM(i->rtscts_rate, AR_RTSCTSRate);
50262
50263- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
50264+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
50265
50266- ACCESS_ONCE(ads->ctl20) = SM(i->txpower, AR_XmitPower1);
50267- ACCESS_ONCE(ads->ctl21) = SM(i->txpower, AR_XmitPower2);
50268- ACCESS_ONCE(ads->ctl22) = SM(i->txpower, AR_XmitPower3);
50269+ ACCESS_ONCE_RW(ads->ctl20) = SM(i->txpower, AR_XmitPower1);
50270+ ACCESS_ONCE_RW(ads->ctl21) = SM(i->txpower, AR_XmitPower2);
50271+ ACCESS_ONCE_RW(ads->ctl22) = SM(i->txpower, AR_XmitPower3);
50272 }
50273
50274 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
50275diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
50276index 51b4ebe..d1929dd 100644
50277--- a/drivers/net/wireless/ath/ath9k/hw.h
50278+++ b/drivers/net/wireless/ath/ath9k/hw.h
50279@@ -629,7 +629,7 @@ struct ath_hw_private_ops {
50280
50281 /* ANI */
50282 void (*ani_cache_ini_regs)(struct ath_hw *ah);
50283-};
50284+} __no_const;
50285
50286 /**
50287 * struct ath_spec_scan - parameters for Atheros spectral scan
50288@@ -706,7 +706,7 @@ struct ath_hw_ops {
50289 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
50290 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
50291 #endif
50292-};
50293+} __no_const;
50294
50295 struct ath_nf_limits {
50296 s16 max;
50297diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
50298index 4b148bb..ac738fa 100644
50299--- a/drivers/net/wireless/ath/ath9k/main.c
50300+++ b/drivers/net/wireless/ath/ath9k/main.c
50301@@ -2592,16 +2592,18 @@ void ath9k_fill_chanctx_ops(void)
50302 if (!ath9k_use_chanctx)
50303 return;
50304
50305- ath9k_ops.hw_scan = ath9k_hw_scan;
50306- ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
50307- ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
50308- ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
50309- ath9k_ops.add_chanctx = ath9k_add_chanctx;
50310- ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
50311- ath9k_ops.change_chanctx = ath9k_change_chanctx;
50312- ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
50313- ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
50314- ath9k_ops.mgd_prepare_tx = ath9k_chanctx_force_active;
50315+ pax_open_kernel();
50316+ *(void **)&ath9k_ops.hw_scan = ath9k_hw_scan;
50317+ *(void **)&ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
50318+ *(void **)&ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
50319+ *(void **)&ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
50320+ *(void **)&ath9k_ops.add_chanctx = ath9k_add_chanctx;
50321+ *(void **)&ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
50322+ *(void **)&ath9k_ops.change_chanctx = ath9k_change_chanctx;
50323+ *(void **)&ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
50324+ *(void **)&ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
50325+ *(void **)&ath9k_ops.mgd_prepare_tx = ath9k_chanctx_force_active;
50326+ pax_close_kernel();
50327 }
50328
50329 struct ieee80211_ops ath9k_ops = {
50330diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
50331index 92190da..f3a4c4c 100644
50332--- a/drivers/net/wireless/b43/phy_lp.c
50333+++ b/drivers/net/wireless/b43/phy_lp.c
50334@@ -2514,7 +2514,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
50335 {
50336 struct ssb_bus *bus = dev->dev->sdev->bus;
50337
50338- static const struct b206x_channel *chandata = NULL;
50339+ const struct b206x_channel *chandata = NULL;
50340 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
50341 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
50342 u16 old_comm15, scale;
50343diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
50344index dc1d20c..f7a4f06 100644
50345--- a/drivers/net/wireless/iwlegacy/3945-mac.c
50346+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
50347@@ -3633,7 +3633,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
50348 */
50349 if (il3945_mod_params.disable_hw_scan) {
50350 D_INFO("Disabling hw_scan\n");
50351- il3945_mac_ops.hw_scan = NULL;
50352+ pax_open_kernel();
50353+ *(void **)&il3945_mac_ops.hw_scan = NULL;
50354+ pax_close_kernel();
50355 }
50356
50357 D_INFO("*** LOAD DRIVER ***\n");
50358diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50359index 0ffb6ff..c0b7f0e 100644
50360--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50361+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50362@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
50363 {
50364 struct iwl_priv *priv = file->private_data;
50365 char buf[64];
50366- int buf_size;
50367+ size_t buf_size;
50368 u32 offset, len;
50369
50370 memset(buf, 0, sizeof(buf));
50371@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
50372 struct iwl_priv *priv = file->private_data;
50373
50374 char buf[8];
50375- int buf_size;
50376+ size_t buf_size;
50377 u32 reset_flag;
50378
50379 memset(buf, 0, sizeof(buf));
50380@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
50381 {
50382 struct iwl_priv *priv = file->private_data;
50383 char buf[8];
50384- int buf_size;
50385+ size_t buf_size;
50386 int ht40;
50387
50388 memset(buf, 0, sizeof(buf));
50389@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
50390 {
50391 struct iwl_priv *priv = file->private_data;
50392 char buf[8];
50393- int buf_size;
50394+ size_t buf_size;
50395 int value;
50396
50397 memset(buf, 0, sizeof(buf));
50398@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
50399 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
50400 DEBUGFS_READ_FILE_OPS(current_sleep_command);
50401
50402-static const char *fmt_value = " %-30s %10u\n";
50403-static const char *fmt_hex = " %-30s 0x%02X\n";
50404-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
50405-static const char *fmt_header =
50406+static const char fmt_value[] = " %-30s %10u\n";
50407+static const char fmt_hex[] = " %-30s 0x%02X\n";
50408+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
50409+static const char fmt_header[] =
50410 "%-32s current cumulative delta max\n";
50411
50412 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
50413@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
50414 {
50415 struct iwl_priv *priv = file->private_data;
50416 char buf[8];
50417- int buf_size;
50418+ size_t buf_size;
50419 int clear;
50420
50421 memset(buf, 0, sizeof(buf));
50422@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
50423 {
50424 struct iwl_priv *priv = file->private_data;
50425 char buf[8];
50426- int buf_size;
50427+ size_t buf_size;
50428 int trace;
50429
50430 memset(buf, 0, sizeof(buf));
50431@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
50432 {
50433 struct iwl_priv *priv = file->private_data;
50434 char buf[8];
50435- int buf_size;
50436+ size_t buf_size;
50437 int missed;
50438
50439 memset(buf, 0, sizeof(buf));
50440@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
50441
50442 struct iwl_priv *priv = file->private_data;
50443 char buf[8];
50444- int buf_size;
50445+ size_t buf_size;
50446 int plcp;
50447
50448 memset(buf, 0, sizeof(buf));
50449@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
50450
50451 struct iwl_priv *priv = file->private_data;
50452 char buf[8];
50453- int buf_size;
50454+ size_t buf_size;
50455 int flush;
50456
50457 memset(buf, 0, sizeof(buf));
50458@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
50459
50460 struct iwl_priv *priv = file->private_data;
50461 char buf[8];
50462- int buf_size;
50463+ size_t buf_size;
50464 int rts;
50465
50466 if (!priv->cfg->ht_params)
50467@@ -2204,7 +2204,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
50468 {
50469 struct iwl_priv *priv = file->private_data;
50470 char buf[8];
50471- int buf_size;
50472+ size_t buf_size;
50473
50474 memset(buf, 0, sizeof(buf));
50475 buf_size = min(count, sizeof(buf) - 1);
50476@@ -2238,7 +2238,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
50477 struct iwl_priv *priv = file->private_data;
50478 u32 event_log_flag;
50479 char buf[8];
50480- int buf_size;
50481+ size_t buf_size;
50482
50483 /* check that the interface is up */
50484 if (!iwl_is_ready(priv))
50485@@ -2292,7 +2292,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
50486 struct iwl_priv *priv = file->private_data;
50487 char buf[8];
50488 u32 calib_disabled;
50489- int buf_size;
50490+ size_t buf_size;
50491
50492 memset(buf, 0, sizeof(buf));
50493 buf_size = min(count, sizeof(buf) - 1);
50494diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
50495index 06e04aa..d5e1f0d 100644
50496--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
50497+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
50498@@ -1684,7 +1684,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
50499 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
50500
50501 char buf[8];
50502- int buf_size;
50503+ size_t buf_size;
50504 u32 reset_flag;
50505
50506 memset(buf, 0, sizeof(buf));
50507@@ -1705,7 +1705,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
50508 {
50509 struct iwl_trans *trans = file->private_data;
50510 char buf[8];
50511- int buf_size;
50512+ size_t buf_size;
50513 int csr;
50514
50515 memset(buf, 0, sizeof(buf));
50516diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
50517index 1326f61..9e56010f 100644
50518--- a/drivers/net/wireless/mac80211_hwsim.c
50519+++ b/drivers/net/wireless/mac80211_hwsim.c
50520@@ -2575,20 +2575,20 @@ static int __init init_mac80211_hwsim(void)
50521 if (channels < 1)
50522 return -EINVAL;
50523
50524- mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
50525- mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
50526- mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
50527- mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
50528- mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
50529- mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
50530- mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
50531- mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
50532- mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
50533- mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
50534- mac80211_hwsim_mchan_ops.assign_vif_chanctx =
50535- mac80211_hwsim_assign_vif_chanctx;
50536- mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
50537- mac80211_hwsim_unassign_vif_chanctx;
50538+ pax_open_kernel();
50539+ memcpy((void *)&mac80211_hwsim_mchan_ops, &mac80211_hwsim_ops, sizeof mac80211_hwsim_mchan_ops);
50540+ *(void **)&mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
50541+ *(void **)&mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
50542+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
50543+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
50544+ *(void **)&mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
50545+ *(void **)&mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
50546+ *(void **)&mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
50547+ *(void **)&mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
50548+ *(void **)&mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
50549+ *(void **)&mac80211_hwsim_mchan_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
50550+ *(void **)&mac80211_hwsim_mchan_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
50551+ pax_close_kernel();
50552
50553 spin_lock_init(&hwsim_radio_lock);
50554 INIT_LIST_HEAD(&hwsim_radios);
50555diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
50556index d2a9a08..0cb175d 100644
50557--- a/drivers/net/wireless/rndis_wlan.c
50558+++ b/drivers/net/wireless/rndis_wlan.c
50559@@ -1236,7 +1236,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
50560
50561 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
50562
50563- if (rts_threshold < 0 || rts_threshold > 2347)
50564+ if (rts_threshold > 2347)
50565 rts_threshold = 2347;
50566
50567 tmp = cpu_to_le32(rts_threshold);
50568diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
50569index d13f25c..2573994 100644
50570--- a/drivers/net/wireless/rt2x00/rt2x00.h
50571+++ b/drivers/net/wireless/rt2x00/rt2x00.h
50572@@ -375,7 +375,7 @@ struct rt2x00_intf {
50573 * for hardware which doesn't support hardware
50574 * sequence counting.
50575 */
50576- atomic_t seqno;
50577+ atomic_unchecked_t seqno;
50578 };
50579
50580 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
50581diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
50582index 8e68f87..c35ba29 100644
50583--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
50584+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
50585@@ -250,9 +250,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
50586 * sequence counter given by mac80211.
50587 */
50588 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
50589- seqno = atomic_add_return(0x10, &intf->seqno);
50590+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
50591 else
50592- seqno = atomic_read(&intf->seqno);
50593+ seqno = atomic_read_unchecked(&intf->seqno);
50594
50595 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
50596 hdr->seq_ctrl |= cpu_to_le16(seqno);
50597diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
50598index b661f896..ddf7d2b 100644
50599--- a/drivers/net/wireless/ti/wl1251/sdio.c
50600+++ b/drivers/net/wireless/ti/wl1251/sdio.c
50601@@ -282,13 +282,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
50602
50603 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
50604
50605- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
50606- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
50607+ pax_open_kernel();
50608+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
50609+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
50610+ pax_close_kernel();
50611
50612 wl1251_info("using dedicated interrupt line");
50613 } else {
50614- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
50615- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
50616+ pax_open_kernel();
50617+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
50618+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
50619+ pax_close_kernel();
50620
50621 wl1251_info("using SDIO interrupt");
50622 }
50623diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
50624index 0bccf12..3d95068 100644
50625--- a/drivers/net/wireless/ti/wl12xx/main.c
50626+++ b/drivers/net/wireless/ti/wl12xx/main.c
50627@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
50628 sizeof(wl->conf.mem));
50629
50630 /* read data preparation is only needed by wl127x */
50631- wl->ops->prepare_read = wl127x_prepare_read;
50632+ pax_open_kernel();
50633+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
50634+ pax_close_kernel();
50635
50636 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
50637 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
50638@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
50639 sizeof(wl->conf.mem));
50640
50641 /* read data preparation is only needed by wl127x */
50642- wl->ops->prepare_read = wl127x_prepare_read;
50643+ pax_open_kernel();
50644+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
50645+ pax_close_kernel();
50646
50647 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
50648 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
50649diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
50650index 7af1936..128bb35 100644
50651--- a/drivers/net/wireless/ti/wl18xx/main.c
50652+++ b/drivers/net/wireless/ti/wl18xx/main.c
50653@@ -1916,8 +1916,10 @@ static int wl18xx_setup(struct wl1271 *wl)
50654 }
50655
50656 if (!checksum_param) {
50657- wl18xx_ops.set_rx_csum = NULL;
50658- wl18xx_ops.init_vif = NULL;
50659+ pax_open_kernel();
50660+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
50661+ *(void **)&wl18xx_ops.init_vif = NULL;
50662+ pax_close_kernel();
50663 }
50664
50665 /* Enable 11a Band only if we have 5G antennas */
50666diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
50667index a912dc0..a8225ba 100644
50668--- a/drivers/net/wireless/zd1211rw/zd_usb.c
50669+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
50670@@ -385,7 +385,7 @@ static inline void handle_regs_int(struct urb *urb)
50671 {
50672 struct zd_usb *usb = urb->context;
50673 struct zd_usb_interrupt *intr = &usb->intr;
50674- int len;
50675+ unsigned int len;
50676 u16 int_num;
50677
50678 ZD_ASSERT(in_interrupt());
50679diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
50680index 683671a..4519fc2 100644
50681--- a/drivers/nfc/nfcwilink.c
50682+++ b/drivers/nfc/nfcwilink.c
50683@@ -497,7 +497,7 @@ static struct nci_ops nfcwilink_ops = {
50684
50685 static int nfcwilink_probe(struct platform_device *pdev)
50686 {
50687- static struct nfcwilink *drv;
50688+ struct nfcwilink *drv;
50689 int rc;
50690 __u32 protocols;
50691
50692diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
50693index d93b2b6..ae50401 100644
50694--- a/drivers/oprofile/buffer_sync.c
50695+++ b/drivers/oprofile/buffer_sync.c
50696@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
50697 if (cookie == NO_COOKIE)
50698 offset = pc;
50699 if (cookie == INVALID_COOKIE) {
50700- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
50701+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
50702 offset = pc;
50703 }
50704 if (cookie != last_cookie) {
50705@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
50706 /* add userspace sample */
50707
50708 if (!mm) {
50709- atomic_inc(&oprofile_stats.sample_lost_no_mm);
50710+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
50711 return 0;
50712 }
50713
50714 cookie = lookup_dcookie(mm, s->eip, &offset);
50715
50716 if (cookie == INVALID_COOKIE) {
50717- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
50718+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
50719 return 0;
50720 }
50721
50722@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
50723 /* ignore backtraces if failed to add a sample */
50724 if (state == sb_bt_start) {
50725 state = sb_bt_ignore;
50726- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
50727+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
50728 }
50729 }
50730 release_mm(mm);
50731diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
50732index c0cc4e7..44d4e54 100644
50733--- a/drivers/oprofile/event_buffer.c
50734+++ b/drivers/oprofile/event_buffer.c
50735@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
50736 }
50737
50738 if (buffer_pos == buffer_size) {
50739- atomic_inc(&oprofile_stats.event_lost_overflow);
50740+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
50741 return;
50742 }
50743
50744diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
50745index ed2c3ec..deda85a 100644
50746--- a/drivers/oprofile/oprof.c
50747+++ b/drivers/oprofile/oprof.c
50748@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
50749 if (oprofile_ops.switch_events())
50750 return;
50751
50752- atomic_inc(&oprofile_stats.multiplex_counter);
50753+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
50754 start_switch_worker();
50755 }
50756
50757diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
50758index ee2cfce..7f8f699 100644
50759--- a/drivers/oprofile/oprofile_files.c
50760+++ b/drivers/oprofile/oprofile_files.c
50761@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
50762
50763 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
50764
50765-static ssize_t timeout_read(struct file *file, char __user *buf,
50766+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
50767 size_t count, loff_t *offset)
50768 {
50769 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
50770diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
50771index 59659ce..6c860a0 100644
50772--- a/drivers/oprofile/oprofile_stats.c
50773+++ b/drivers/oprofile/oprofile_stats.c
50774@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
50775 cpu_buf->sample_invalid_eip = 0;
50776 }
50777
50778- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
50779- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
50780- atomic_set(&oprofile_stats.event_lost_overflow, 0);
50781- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
50782- atomic_set(&oprofile_stats.multiplex_counter, 0);
50783+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
50784+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
50785+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
50786+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
50787+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
50788 }
50789
50790
50791diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
50792index 1fc622b..8c48fc3 100644
50793--- a/drivers/oprofile/oprofile_stats.h
50794+++ b/drivers/oprofile/oprofile_stats.h
50795@@ -13,11 +13,11 @@
50796 #include <linux/atomic.h>
50797
50798 struct oprofile_stat_struct {
50799- atomic_t sample_lost_no_mm;
50800- atomic_t sample_lost_no_mapping;
50801- atomic_t bt_lost_no_mapping;
50802- atomic_t event_lost_overflow;
50803- atomic_t multiplex_counter;
50804+ atomic_unchecked_t sample_lost_no_mm;
50805+ atomic_unchecked_t sample_lost_no_mapping;
50806+ atomic_unchecked_t bt_lost_no_mapping;
50807+ atomic_unchecked_t event_lost_overflow;
50808+ atomic_unchecked_t multiplex_counter;
50809 };
50810
50811 extern struct oprofile_stat_struct oprofile_stats;
50812diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
50813index 3f49345..c750d0b 100644
50814--- a/drivers/oprofile/oprofilefs.c
50815+++ b/drivers/oprofile/oprofilefs.c
50816@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
50817
50818 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
50819 {
50820- atomic_t *val = file->private_data;
50821- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
50822+ atomic_unchecked_t *val = file->private_data;
50823+ return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
50824 }
50825
50826
50827@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
50828
50829
50830 int oprofilefs_create_ro_atomic(struct dentry *root,
50831- char const *name, atomic_t *val)
50832+ char const *name, atomic_unchecked_t *val)
50833 {
50834 return __oprofilefs_create_file(root, name,
50835 &atomic_ro_fops, 0444, val);
50836diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
50837index 61be1d9..dec05d7 100644
50838--- a/drivers/oprofile/timer_int.c
50839+++ b/drivers/oprofile/timer_int.c
50840@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
50841 return NOTIFY_OK;
50842 }
50843
50844-static struct notifier_block __refdata oprofile_cpu_notifier = {
50845+static struct notifier_block oprofile_cpu_notifier = {
50846 .notifier_call = oprofile_cpu_notify,
50847 };
50848
50849diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
50850index 3b47080..6cd05dd 100644
50851--- a/drivers/parport/procfs.c
50852+++ b/drivers/parport/procfs.c
50853@@ -64,7 +64,7 @@ static int do_active_device(struct ctl_table *table, int write,
50854
50855 *ppos += len;
50856
50857- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
50858+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
50859 }
50860
50861 #ifdef CONFIG_PARPORT_1284
50862@@ -106,7 +106,7 @@ static int do_autoprobe(struct ctl_table *table, int write,
50863
50864 *ppos += len;
50865
50866- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
50867+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
50868 }
50869 #endif /* IEEE1284.3 support. */
50870
50871diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
50872index 8dcccff..35d701d 100644
50873--- a/drivers/pci/hotplug/acpiphp_ibm.c
50874+++ b/drivers/pci/hotplug/acpiphp_ibm.c
50875@@ -452,7 +452,9 @@ static int __init ibm_acpiphp_init(void)
50876 goto init_cleanup;
50877 }
50878
50879- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
50880+ pax_open_kernel();
50881+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
50882+ pax_close_kernel();
50883 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
50884
50885 return retval;
50886diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
50887index 04fcd78..39e83f1 100644
50888--- a/drivers/pci/hotplug/cpcihp_generic.c
50889+++ b/drivers/pci/hotplug/cpcihp_generic.c
50890@@ -73,7 +73,6 @@ static u16 port;
50891 static unsigned int enum_bit;
50892 static u8 enum_mask;
50893
50894-static struct cpci_hp_controller_ops generic_hpc_ops;
50895 static struct cpci_hp_controller generic_hpc;
50896
50897 static int __init validate_parameters(void)
50898@@ -139,6 +138,10 @@ static int query_enum(void)
50899 return ((value & enum_mask) == enum_mask);
50900 }
50901
50902+static struct cpci_hp_controller_ops generic_hpc_ops = {
50903+ .query_enum = query_enum,
50904+};
50905+
50906 static int __init cpcihp_generic_init(void)
50907 {
50908 int status;
50909@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
50910 pci_dev_put(dev);
50911
50912 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
50913- generic_hpc_ops.query_enum = query_enum;
50914 generic_hpc.ops = &generic_hpc_ops;
50915
50916 status = cpci_hp_register_controller(&generic_hpc);
50917diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
50918index 6757b3e..d3bad62 100644
50919--- a/drivers/pci/hotplug/cpcihp_zt5550.c
50920+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
50921@@ -59,7 +59,6 @@
50922 /* local variables */
50923 static bool debug;
50924 static bool poll;
50925-static struct cpci_hp_controller_ops zt5550_hpc_ops;
50926 static struct cpci_hp_controller zt5550_hpc;
50927
50928 /* Primary cPCI bus bridge device */
50929@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
50930 return 0;
50931 }
50932
50933+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
50934+ .query_enum = zt5550_hc_query_enum,
50935+};
50936+
50937 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
50938 {
50939 int status;
50940@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
50941 dbg("returned from zt5550_hc_config");
50942
50943 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
50944- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
50945 zt5550_hpc.ops = &zt5550_hpc_ops;
50946 if(!poll) {
50947 zt5550_hpc.irq = hc_dev->irq;
50948 zt5550_hpc.irq_flags = IRQF_SHARED;
50949 zt5550_hpc.dev_id = hc_dev;
50950
50951- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
50952- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
50953- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
50954+ pax_open_kernel();
50955+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
50956+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
50957+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
50958+ pax_open_kernel();
50959 } else {
50960 info("using ENUM# polling mode");
50961 }
50962diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
50963index 0968a9b..5a00edf 100644
50964--- a/drivers/pci/hotplug/cpqphp_nvram.c
50965+++ b/drivers/pci/hotplug/cpqphp_nvram.c
50966@@ -427,9 +427,13 @@ static u32 store_HRT (void __iomem *rom_start)
50967
50968 void compaq_nvram_init (void __iomem *rom_start)
50969 {
50970+
50971+#ifndef CONFIG_PAX_KERNEXEC
50972 if (rom_start) {
50973 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
50974 }
50975+#endif
50976+
50977 dbg("int15 entry = %p\n", compaq_int15_entry_point);
50978
50979 /* initialize our int15 lock */
50980diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
50981index 56d8486..f26113f 100644
50982--- a/drivers/pci/hotplug/pci_hotplug_core.c
50983+++ b/drivers/pci/hotplug/pci_hotplug_core.c
50984@@ -436,8 +436,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
50985 return -EINVAL;
50986 }
50987
50988- slot->ops->owner = owner;
50989- slot->ops->mod_name = mod_name;
50990+ pax_open_kernel();
50991+ *(struct module **)&slot->ops->owner = owner;
50992+ *(const char **)&slot->ops->mod_name = mod_name;
50993+ pax_close_kernel();
50994
50995 mutex_lock(&pci_hp_mutex);
50996 /*
50997diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
50998index 07aa722..84514b4 100644
50999--- a/drivers/pci/hotplug/pciehp_core.c
51000+++ b/drivers/pci/hotplug/pciehp_core.c
51001@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
51002 struct slot *slot = ctrl->slot;
51003 struct hotplug_slot *hotplug = NULL;
51004 struct hotplug_slot_info *info = NULL;
51005- struct hotplug_slot_ops *ops = NULL;
51006+ hotplug_slot_ops_no_const *ops = NULL;
51007 char name[SLOT_NAME_SIZE];
51008 int retval = -ENOMEM;
51009
51010diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
51011index 5a40516..136d5a7 100644
51012--- a/drivers/pci/msi.c
51013+++ b/drivers/pci/msi.c
51014@@ -507,8 +507,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
51015 {
51016 struct attribute **msi_attrs;
51017 struct attribute *msi_attr;
51018- struct device_attribute *msi_dev_attr;
51019- struct attribute_group *msi_irq_group;
51020+ device_attribute_no_const *msi_dev_attr;
51021+ attribute_group_no_const *msi_irq_group;
51022 const struct attribute_group **msi_irq_groups;
51023 struct msi_desc *entry;
51024 int ret = -ENOMEM;
51025@@ -568,7 +568,7 @@ error_attrs:
51026 count = 0;
51027 msi_attr = msi_attrs[count];
51028 while (msi_attr) {
51029- msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
51030+ msi_dev_attr = container_of(msi_attr, device_attribute_no_const, attr);
51031 kfree(msi_attr->name);
51032 kfree(msi_dev_attr);
51033 ++count;
51034diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
51035index 76ef791..adc3bd1 100644
51036--- a/drivers/pci/pci-sysfs.c
51037+++ b/drivers/pci/pci-sysfs.c
51038@@ -1134,7 +1134,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
51039 {
51040 /* allocate attribute structure, piggyback attribute name */
51041 int name_len = write_combine ? 13 : 10;
51042- struct bin_attribute *res_attr;
51043+ bin_attribute_no_const *res_attr;
51044 int retval;
51045
51046 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
51047@@ -1311,7 +1311,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
51048 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
51049 {
51050 int retval;
51051- struct bin_attribute *attr;
51052+ bin_attribute_no_const *attr;
51053
51054 /* If the device has VPD, try to expose it in sysfs. */
51055 if (dev->vpd) {
51056@@ -1358,7 +1358,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
51057 {
51058 int retval;
51059 int rom_size = 0;
51060- struct bin_attribute *attr;
51061+ bin_attribute_no_const *attr;
51062
51063 if (!sysfs_initialized)
51064 return -EACCES;
51065diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
51066index 0601890..dc15007 100644
51067--- a/drivers/pci/pci.h
51068+++ b/drivers/pci/pci.h
51069@@ -91,7 +91,7 @@ struct pci_vpd_ops {
51070 struct pci_vpd {
51071 unsigned int len;
51072 const struct pci_vpd_ops *ops;
51073- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
51074+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
51075 };
51076
51077 int pci_vpd_pci22_init(struct pci_dev *dev);
51078diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
51079index e1e7026..d28dd33 100644
51080--- a/drivers/pci/pcie/aspm.c
51081+++ b/drivers/pci/pcie/aspm.c
51082@@ -27,9 +27,9 @@
51083 #define MODULE_PARAM_PREFIX "pcie_aspm."
51084
51085 /* Note: those are not register definitions */
51086-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
51087-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
51088-#define ASPM_STATE_L1 (4) /* L1 state */
51089+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
51090+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
51091+#define ASPM_STATE_L1 (4U) /* L1 state */
51092 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
51093 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
51094
51095diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
51096index 4170113..7cc5339 100644
51097--- a/drivers/pci/probe.c
51098+++ b/drivers/pci/probe.c
51099@@ -176,7 +176,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
51100 struct pci_bus_region region, inverted_region;
51101 bool bar_too_big = false, bar_too_high = false, bar_invalid = false;
51102
51103- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
51104+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
51105
51106 /* No printks while decoding is disabled! */
51107 if (!dev->mmio_always_on) {
51108diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
51109index 3f155e7..0f4b1f0 100644
51110--- a/drivers/pci/proc.c
51111+++ b/drivers/pci/proc.c
51112@@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
51113 static int __init pci_proc_init(void)
51114 {
51115 struct pci_dev *dev = NULL;
51116+
51117+#ifdef CONFIG_GRKERNSEC_PROC_ADD
51118+#ifdef CONFIG_GRKERNSEC_PROC_USER
51119+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
51120+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51121+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
51122+#endif
51123+#else
51124 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
51125+#endif
51126 proc_create("devices", 0, proc_bus_pci_dir,
51127 &proc_bus_pci_dev_operations);
51128 proc_initialized = 1;
51129diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
51130index d866db8..c827d1f 100644
51131--- a/drivers/platform/chrome/chromeos_laptop.c
51132+++ b/drivers/platform/chrome/chromeos_laptop.c
51133@@ -479,7 +479,7 @@ static struct chromeos_laptop cr48 = {
51134 .callback = chromeos_laptop_dmi_matched, \
51135 .driver_data = (void *)&board_
51136
51137-static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
51138+static struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
51139 {
51140 .ident = "Samsung Series 5 550",
51141 .matches = {
51142diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
51143index c5af23b..3d62d5e 100644
51144--- a/drivers/platform/x86/alienware-wmi.c
51145+++ b/drivers/platform/x86/alienware-wmi.c
51146@@ -150,7 +150,7 @@ struct wmax_led_args {
51147 } __packed;
51148
51149 static struct platform_device *platform_device;
51150-static struct device_attribute *zone_dev_attrs;
51151+static device_attribute_no_const *zone_dev_attrs;
51152 static struct attribute **zone_attrs;
51153 static struct platform_zone *zone_data;
51154
51155@@ -161,7 +161,7 @@ static struct platform_driver platform_driver = {
51156 }
51157 };
51158
51159-static struct attribute_group zone_attribute_group = {
51160+static attribute_group_no_const zone_attribute_group = {
51161 .name = "rgb_zones",
51162 };
51163
51164diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
51165index 21fc932..ee9394a 100644
51166--- a/drivers/platform/x86/asus-wmi.c
51167+++ b/drivers/platform/x86/asus-wmi.c
51168@@ -1590,6 +1590,10 @@ static int show_dsts(struct seq_file *m, void *data)
51169 int err;
51170 u32 retval = -1;
51171
51172+#ifdef CONFIG_GRKERNSEC_KMEM
51173+ return -EPERM;
51174+#endif
51175+
51176 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
51177
51178 if (err < 0)
51179@@ -1606,6 +1610,10 @@ static int show_devs(struct seq_file *m, void *data)
51180 int err;
51181 u32 retval = -1;
51182
51183+#ifdef CONFIG_GRKERNSEC_KMEM
51184+ return -EPERM;
51185+#endif
51186+
51187 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
51188 &retval);
51189
51190@@ -1630,6 +1638,10 @@ static int show_call(struct seq_file *m, void *data)
51191 union acpi_object *obj;
51192 acpi_status status;
51193
51194+#ifdef CONFIG_GRKERNSEC_KMEM
51195+ return -EPERM;
51196+#endif
51197+
51198 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
51199 1, asus->debug.method_id,
51200 &input, &output);
51201diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
51202index 62f8030..c7f2a45 100644
51203--- a/drivers/platform/x86/msi-laptop.c
51204+++ b/drivers/platform/x86/msi-laptop.c
51205@@ -1000,12 +1000,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
51206
51207 if (!quirks->ec_read_only) {
51208 /* allow userland write sysfs file */
51209- dev_attr_bluetooth.store = store_bluetooth;
51210- dev_attr_wlan.store = store_wlan;
51211- dev_attr_threeg.store = store_threeg;
51212- dev_attr_bluetooth.attr.mode |= S_IWUSR;
51213- dev_attr_wlan.attr.mode |= S_IWUSR;
51214- dev_attr_threeg.attr.mode |= S_IWUSR;
51215+ pax_open_kernel();
51216+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
51217+ *(void **)&dev_attr_wlan.store = store_wlan;
51218+ *(void **)&dev_attr_threeg.store = store_threeg;
51219+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
51220+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
51221+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
51222+ pax_close_kernel();
51223 }
51224
51225 /* disable hardware control by fn key */
51226diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
51227index 70222f2..8c8ce66 100644
51228--- a/drivers/platform/x86/msi-wmi.c
51229+++ b/drivers/platform/x86/msi-wmi.c
51230@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
51231 static void msi_wmi_notify(u32 value, void *context)
51232 {
51233 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
51234- static struct key_entry *key;
51235+ struct key_entry *key;
51236 union acpi_object *obj;
51237 acpi_status status;
51238
51239diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
51240index 26ad9ff..7c52909 100644
51241--- a/drivers/platform/x86/sony-laptop.c
51242+++ b/drivers/platform/x86/sony-laptop.c
51243@@ -2527,7 +2527,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
51244 }
51245
51246 /* High speed charging function */
51247-static struct device_attribute *hsc_handle;
51248+static device_attribute_no_const *hsc_handle;
51249
51250 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
51251 struct device_attribute *attr,
51252@@ -2601,7 +2601,7 @@ static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
51253 }
51254
51255 /* low battery function */
51256-static struct device_attribute *lowbatt_handle;
51257+static device_attribute_no_const *lowbatt_handle;
51258
51259 static ssize_t sony_nc_lowbatt_store(struct device *dev,
51260 struct device_attribute *attr,
51261@@ -2667,7 +2667,7 @@ static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
51262 }
51263
51264 /* fan speed function */
51265-static struct device_attribute *fan_handle, *hsf_handle;
51266+static device_attribute_no_const *fan_handle, *hsf_handle;
51267
51268 static ssize_t sony_nc_hsfan_store(struct device *dev,
51269 struct device_attribute *attr,
51270@@ -2774,7 +2774,7 @@ static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
51271 }
51272
51273 /* USB charge function */
51274-static struct device_attribute *uc_handle;
51275+static device_attribute_no_const *uc_handle;
51276
51277 static ssize_t sony_nc_usb_charge_store(struct device *dev,
51278 struct device_attribute *attr,
51279@@ -2848,7 +2848,7 @@ static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
51280 }
51281
51282 /* Panel ID function */
51283-static struct device_attribute *panel_handle;
51284+static device_attribute_no_const *panel_handle;
51285
51286 static ssize_t sony_nc_panelid_show(struct device *dev,
51287 struct device_attribute *attr, char *buffer)
51288@@ -2895,7 +2895,7 @@ static void sony_nc_panelid_cleanup(struct platform_device *pd)
51289 }
51290
51291 /* smart connect function */
51292-static struct device_attribute *sc_handle;
51293+static device_attribute_no_const *sc_handle;
51294
51295 static ssize_t sony_nc_smart_conn_store(struct device *dev,
51296 struct device_attribute *attr,
51297diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
51298index 3bbc6eb..7760460 100644
51299--- a/drivers/platform/x86/thinkpad_acpi.c
51300+++ b/drivers/platform/x86/thinkpad_acpi.c
51301@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
51302 return 0;
51303 }
51304
51305-void static hotkey_mask_warn_incomplete_mask(void)
51306+static void hotkey_mask_warn_incomplete_mask(void)
51307 {
51308 /* log only what the user can fix... */
51309 const u32 wantedmask = hotkey_driver_mask &
51310@@ -2438,10 +2438,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
51311 && !tp_features.bright_unkfw)
51312 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
51313 }
51314+}
51315
51316 #undef TPACPI_COMPARE_KEY
51317 #undef TPACPI_MAY_SEND_KEY
51318-}
51319
51320 /*
51321 * Polling driver
51322diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
51323index 438d4c7..ca8a2fb 100644
51324--- a/drivers/pnp/pnpbios/bioscalls.c
51325+++ b/drivers/pnp/pnpbios/bioscalls.c
51326@@ -59,7 +59,7 @@ do { \
51327 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
51328 } while(0)
51329
51330-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
51331+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
51332 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
51333
51334 /*
51335@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
51336
51337 cpu = get_cpu();
51338 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
51339+
51340+ pax_open_kernel();
51341 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
51342+ pax_close_kernel();
51343
51344 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
51345 spin_lock_irqsave(&pnp_bios_lock, flags);
51346@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
51347 :"memory");
51348 spin_unlock_irqrestore(&pnp_bios_lock, flags);
51349
51350+ pax_open_kernel();
51351 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
51352+ pax_close_kernel();
51353+
51354 put_cpu();
51355
51356 /* If we get here and this is set then the PnP BIOS faulted on us. */
51357@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
51358 return status;
51359 }
51360
51361-void pnpbios_calls_init(union pnp_bios_install_struct *header)
51362+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
51363 {
51364 int i;
51365
51366@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
51367 pnp_bios_callpoint.offset = header->fields.pm16offset;
51368 pnp_bios_callpoint.segment = PNP_CS16;
51369
51370+ pax_open_kernel();
51371+
51372 for_each_possible_cpu(i) {
51373 struct desc_struct *gdt = get_cpu_gdt_table(i);
51374 if (!gdt)
51375@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
51376 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
51377 (unsigned long)__va(header->fields.pm16dseg));
51378 }
51379+
51380+ pax_close_kernel();
51381 }
51382diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
51383index 0c52e2a..3421ab7 100644
51384--- a/drivers/power/pda_power.c
51385+++ b/drivers/power/pda_power.c
51386@@ -37,7 +37,11 @@ static int polling;
51387
51388 #if IS_ENABLED(CONFIG_USB_PHY)
51389 static struct usb_phy *transceiver;
51390-static struct notifier_block otg_nb;
51391+static int otg_handle_notification(struct notifier_block *nb,
51392+ unsigned long event, void *unused);
51393+static struct notifier_block otg_nb = {
51394+ .notifier_call = otg_handle_notification
51395+};
51396 #endif
51397
51398 static struct regulator *ac_draw;
51399@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
51400
51401 #if IS_ENABLED(CONFIG_USB_PHY)
51402 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
51403- otg_nb.notifier_call = otg_handle_notification;
51404 ret = usb_register_notifier(transceiver, &otg_nb);
51405 if (ret) {
51406 dev_err(dev, "failure to register otg notifier\n");
51407diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
51408index cc439fd..8fa30df 100644
51409--- a/drivers/power/power_supply.h
51410+++ b/drivers/power/power_supply.h
51411@@ -16,12 +16,12 @@ struct power_supply;
51412
51413 #ifdef CONFIG_SYSFS
51414
51415-extern void power_supply_init_attrs(struct device_type *dev_type);
51416+extern void power_supply_init_attrs(void);
51417 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
51418
51419 #else
51420
51421-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
51422+static inline void power_supply_init_attrs(void) {}
51423 #define power_supply_uevent NULL
51424
51425 #endif /* CONFIG_SYSFS */
51426diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
51427index 078afd6..fbac9da 100644
51428--- a/drivers/power/power_supply_core.c
51429+++ b/drivers/power/power_supply_core.c
51430@@ -28,7 +28,10 @@ EXPORT_SYMBOL_GPL(power_supply_class);
51431 ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
51432 EXPORT_SYMBOL_GPL(power_supply_notifier);
51433
51434-static struct device_type power_supply_dev_type;
51435+extern const struct attribute_group *power_supply_attr_groups[];
51436+static struct device_type power_supply_dev_type = {
51437+ .groups = power_supply_attr_groups,
51438+};
51439
51440 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
51441 struct power_supply *supply)
51442@@ -640,7 +643,7 @@ static int __init power_supply_class_init(void)
51443 return PTR_ERR(power_supply_class);
51444
51445 power_supply_class->dev_uevent = power_supply_uevent;
51446- power_supply_init_attrs(&power_supply_dev_type);
51447+ power_supply_init_attrs();
51448
51449 return 0;
51450 }
51451diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
51452index 750a202..99c8f4b 100644
51453--- a/drivers/power/power_supply_sysfs.c
51454+++ b/drivers/power/power_supply_sysfs.c
51455@@ -234,17 +234,15 @@ static struct attribute_group power_supply_attr_group = {
51456 .is_visible = power_supply_attr_is_visible,
51457 };
51458
51459-static const struct attribute_group *power_supply_attr_groups[] = {
51460+const struct attribute_group *power_supply_attr_groups[] = {
51461 &power_supply_attr_group,
51462 NULL,
51463 };
51464
51465-void power_supply_init_attrs(struct device_type *dev_type)
51466+void power_supply_init_attrs(void)
51467 {
51468 int i;
51469
51470- dev_type->groups = power_supply_attr_groups;
51471-
51472 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
51473 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
51474 }
51475diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
51476index 84419af..268ede8 100644
51477--- a/drivers/powercap/powercap_sys.c
51478+++ b/drivers/powercap/powercap_sys.c
51479@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
51480 struct device_attribute name_attr;
51481 };
51482
51483+static ssize_t show_constraint_name(struct device *dev,
51484+ struct device_attribute *dev_attr,
51485+ char *buf);
51486+
51487 static struct powercap_constraint_attr
51488- constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
51489+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
51490+ [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
51491+ .power_limit_attr = {
51492+ .attr = {
51493+ .name = NULL,
51494+ .mode = S_IWUSR | S_IRUGO
51495+ },
51496+ .show = show_constraint_power_limit_uw,
51497+ .store = store_constraint_power_limit_uw
51498+ },
51499+
51500+ .time_window_attr = {
51501+ .attr = {
51502+ .name = NULL,
51503+ .mode = S_IWUSR | S_IRUGO
51504+ },
51505+ .show = show_constraint_time_window_us,
51506+ .store = store_constraint_time_window_us
51507+ },
51508+
51509+ .max_power_attr = {
51510+ .attr = {
51511+ .name = NULL,
51512+ .mode = S_IRUGO
51513+ },
51514+ .show = show_constraint_max_power_uw,
51515+ .store = NULL
51516+ },
51517+
51518+ .min_power_attr = {
51519+ .attr = {
51520+ .name = NULL,
51521+ .mode = S_IRUGO
51522+ },
51523+ .show = show_constraint_min_power_uw,
51524+ .store = NULL
51525+ },
51526+
51527+ .max_time_window_attr = {
51528+ .attr = {
51529+ .name = NULL,
51530+ .mode = S_IRUGO
51531+ },
51532+ .show = show_constraint_max_time_window_us,
51533+ .store = NULL
51534+ },
51535+
51536+ .min_time_window_attr = {
51537+ .attr = {
51538+ .name = NULL,
51539+ .mode = S_IRUGO
51540+ },
51541+ .show = show_constraint_min_time_window_us,
51542+ .store = NULL
51543+ },
51544+
51545+ .name_attr = {
51546+ .attr = {
51547+ .name = NULL,
51548+ .mode = S_IRUGO
51549+ },
51550+ .show = show_constraint_name,
51551+ .store = NULL
51552+ }
51553+ }
51554+};
51555
51556 /* A list of powercap control_types */
51557 static LIST_HEAD(powercap_cntrl_list);
51558@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
51559 }
51560
51561 static int create_constraint_attribute(int id, const char *name,
51562- int mode,
51563- struct device_attribute *dev_attr,
51564- ssize_t (*show)(struct device *,
51565- struct device_attribute *, char *),
51566- ssize_t (*store)(struct device *,
51567- struct device_attribute *,
51568- const char *, size_t)
51569- )
51570+ struct device_attribute *dev_attr)
51571 {
51572+ name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
51573
51574- dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
51575- id, name);
51576- if (!dev_attr->attr.name)
51577+ if (!name)
51578 return -ENOMEM;
51579- dev_attr->attr.mode = mode;
51580- dev_attr->show = show;
51581- dev_attr->store = store;
51582+
51583+ pax_open_kernel();
51584+ *(const char **)&dev_attr->attr.name = name;
51585+ pax_close_kernel();
51586
51587 return 0;
51588 }
51589@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
51590
51591 for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
51592 ret = create_constraint_attribute(i, "power_limit_uw",
51593- S_IWUSR | S_IRUGO,
51594- &constraint_attrs[i].power_limit_attr,
51595- show_constraint_power_limit_uw,
51596- store_constraint_power_limit_uw);
51597+ &constraint_attrs[i].power_limit_attr);
51598 if (ret)
51599 goto err_alloc;
51600 ret = create_constraint_attribute(i, "time_window_us",
51601- S_IWUSR | S_IRUGO,
51602- &constraint_attrs[i].time_window_attr,
51603- show_constraint_time_window_us,
51604- store_constraint_time_window_us);
51605+ &constraint_attrs[i].time_window_attr);
51606 if (ret)
51607 goto err_alloc;
51608- ret = create_constraint_attribute(i, "name", S_IRUGO,
51609- &constraint_attrs[i].name_attr,
51610- show_constraint_name,
51611- NULL);
51612+ ret = create_constraint_attribute(i, "name",
51613+ &constraint_attrs[i].name_attr);
51614 if (ret)
51615 goto err_alloc;
51616- ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
51617- &constraint_attrs[i].max_power_attr,
51618- show_constraint_max_power_uw,
51619- NULL);
51620+ ret = create_constraint_attribute(i, "max_power_uw",
51621+ &constraint_attrs[i].max_power_attr);
51622 if (ret)
51623 goto err_alloc;
51624- ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
51625- &constraint_attrs[i].min_power_attr,
51626- show_constraint_min_power_uw,
51627- NULL);
51628+ ret = create_constraint_attribute(i, "min_power_uw",
51629+ &constraint_attrs[i].min_power_attr);
51630 if (ret)
51631 goto err_alloc;
51632 ret = create_constraint_attribute(i, "max_time_window_us",
51633- S_IRUGO,
51634- &constraint_attrs[i].max_time_window_attr,
51635- show_constraint_max_time_window_us,
51636- NULL);
51637+ &constraint_attrs[i].max_time_window_attr);
51638 if (ret)
51639 goto err_alloc;
51640 ret = create_constraint_attribute(i, "min_time_window_us",
51641- S_IRUGO,
51642- &constraint_attrs[i].min_time_window_attr,
51643- show_constraint_min_time_window_us,
51644- NULL);
51645+ &constraint_attrs[i].min_time_window_attr);
51646 if (ret)
51647 goto err_alloc;
51648
51649@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
51650 power_zone->zone_dev_attrs[count++] =
51651 &dev_attr_max_energy_range_uj.attr;
51652 if (power_zone->ops->get_energy_uj) {
51653+ pax_open_kernel();
51654 if (power_zone->ops->reset_energy_uj)
51655- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
51656+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
51657 else
51658- dev_attr_energy_uj.attr.mode = S_IRUGO;
51659+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
51660+ pax_close_kernel();
51661 power_zone->zone_dev_attrs[count++] =
51662 &dev_attr_energy_uj.attr;
51663 }
51664diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
51665index 9c5d414..c7900ce 100644
51666--- a/drivers/ptp/ptp_private.h
51667+++ b/drivers/ptp/ptp_private.h
51668@@ -51,7 +51,7 @@ struct ptp_clock {
51669 struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
51670 wait_queue_head_t tsev_wq;
51671 int defunct; /* tells readers to go away when clock is being removed */
51672- struct device_attribute *pin_dev_attr;
51673+ device_attribute_no_const *pin_dev_attr;
51674 struct attribute **pin_attr;
51675 struct attribute_group pin_attr_group;
51676 };
51677diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
51678index 302e626..12579af 100644
51679--- a/drivers/ptp/ptp_sysfs.c
51680+++ b/drivers/ptp/ptp_sysfs.c
51681@@ -280,7 +280,7 @@ static int ptp_populate_pins(struct ptp_clock *ptp)
51682 goto no_pin_attr;
51683
51684 for (i = 0; i < n_pins; i++) {
51685- struct device_attribute *da = &ptp->pin_dev_attr[i];
51686+ device_attribute_no_const *da = &ptp->pin_dev_attr[i];
51687 sysfs_attr_init(&da->attr);
51688 da->attr.name = info->pin_config[i].name;
51689 da->attr.mode = 0644;
51690diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
51691index a3c3785..c901e3a 100644
51692--- a/drivers/regulator/core.c
51693+++ b/drivers/regulator/core.c
51694@@ -3481,7 +3481,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
51695 {
51696 const struct regulation_constraints *constraints = NULL;
51697 const struct regulator_init_data *init_data;
51698- static atomic_t regulator_no = ATOMIC_INIT(0);
51699+ static atomic_unchecked_t regulator_no = ATOMIC_INIT(0);
51700 struct regulator_dev *rdev;
51701 struct device *dev;
51702 int ret, i;
51703@@ -3551,7 +3551,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
51704 rdev->dev.of_node = of_node_get(config->of_node);
51705 rdev->dev.parent = dev;
51706 dev_set_name(&rdev->dev, "regulator.%d",
51707- atomic_inc_return(&regulator_no) - 1);
51708+ atomic_inc_return_unchecked(&regulator_no) - 1);
51709 ret = device_register(&rdev->dev);
51710 if (ret != 0) {
51711 put_device(&rdev->dev);
51712diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
51713index 2fc4111..6aa88ca 100644
51714--- a/drivers/regulator/max8660.c
51715+++ b/drivers/regulator/max8660.c
51716@@ -424,8 +424,10 @@ static int max8660_probe(struct i2c_client *client,
51717 max8660->shadow_regs[MAX8660_OVER1] = 5;
51718 } else {
51719 /* Otherwise devices can be toggled via software */
51720- max8660_dcdc_ops.enable = max8660_dcdc_enable;
51721- max8660_dcdc_ops.disable = max8660_dcdc_disable;
51722+ pax_open_kernel();
51723+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
51724+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
51725+ pax_close_kernel();
51726 }
51727
51728 /*
51729diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
51730index dbedf17..18ff6b7 100644
51731--- a/drivers/regulator/max8973-regulator.c
51732+++ b/drivers/regulator/max8973-regulator.c
51733@@ -403,9 +403,11 @@ static int max8973_probe(struct i2c_client *client,
51734 if (!pdata || !pdata->enable_ext_control) {
51735 max->desc.enable_reg = MAX8973_VOUT;
51736 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
51737- max->ops.enable = regulator_enable_regmap;
51738- max->ops.disable = regulator_disable_regmap;
51739- max->ops.is_enabled = regulator_is_enabled_regmap;
51740+ pax_open_kernel();
51741+ *(void **)&max->ops.enable = regulator_enable_regmap;
51742+ *(void **)&max->ops.disable = regulator_disable_regmap;
51743+ *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
51744+ pax_close_kernel();
51745 }
51746
51747 if (pdata) {
51748diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
51749index f374fa5..26f0683 100644
51750--- a/drivers/regulator/mc13892-regulator.c
51751+++ b/drivers/regulator/mc13892-regulator.c
51752@@ -582,10 +582,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
51753 }
51754 mc13xxx_unlock(mc13892);
51755
51756- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
51757+ pax_open_kernel();
51758+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
51759 = mc13892_vcam_set_mode;
51760- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
51761+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
51762 = mc13892_vcam_get_mode;
51763+ pax_close_kernel();
51764
51765 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
51766 ARRAY_SIZE(mc13892_regulators));
51767diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
51768index 5b2e761..c8c8a4a 100644
51769--- a/drivers/rtc/rtc-cmos.c
51770+++ b/drivers/rtc/rtc-cmos.c
51771@@ -789,7 +789,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
51772 hpet_rtc_timer_init();
51773
51774 /* export at least the first block of NVRAM */
51775- nvram.size = address_space - NVRAM_OFFSET;
51776+ pax_open_kernel();
51777+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
51778+ pax_close_kernel();
51779 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
51780 if (retval < 0) {
51781 dev_dbg(dev, "can't create nvram file? %d\n", retval);
51782diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
51783index d049393..bb20be0 100644
51784--- a/drivers/rtc/rtc-dev.c
51785+++ b/drivers/rtc/rtc-dev.c
51786@@ -16,6 +16,7 @@
51787 #include <linux/module.h>
51788 #include <linux/rtc.h>
51789 #include <linux/sched.h>
51790+#include <linux/grsecurity.h>
51791 #include "rtc-core.h"
51792
51793 static dev_t rtc_devt;
51794@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
51795 if (copy_from_user(&tm, uarg, sizeof(tm)))
51796 return -EFAULT;
51797
51798+ gr_log_timechange();
51799+
51800 return rtc_set_time(rtc, &tm);
51801
51802 case RTC_PIE_ON:
51803diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
51804index f03d5ba..8325bf6 100644
51805--- a/drivers/rtc/rtc-ds1307.c
51806+++ b/drivers/rtc/rtc-ds1307.c
51807@@ -107,7 +107,7 @@ struct ds1307 {
51808 u8 offset; /* register's offset */
51809 u8 regs[11];
51810 u16 nvram_offset;
51811- struct bin_attribute *nvram;
51812+ bin_attribute_no_const *nvram;
51813 enum ds_type type;
51814 unsigned long flags;
51815 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
51816diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
51817index 11880c1..b823aa4 100644
51818--- a/drivers/rtc/rtc-m48t59.c
51819+++ b/drivers/rtc/rtc-m48t59.c
51820@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
51821 if (IS_ERR(m48t59->rtc))
51822 return PTR_ERR(m48t59->rtc);
51823
51824- m48t59_nvram_attr.size = pdata->offset;
51825+ pax_open_kernel();
51826+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
51827+ pax_close_kernel();
51828
51829 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
51830 if (ret)
51831diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
51832index e693af6..2e525b6 100644
51833--- a/drivers/scsi/bfa/bfa_fcpim.h
51834+++ b/drivers/scsi/bfa/bfa_fcpim.h
51835@@ -36,7 +36,7 @@ struct bfa_iotag_s {
51836
51837 struct bfa_itn_s {
51838 bfa_isr_func_t isr;
51839-};
51840+} __no_const;
51841
51842 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
51843 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
51844diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
51845index 0f19455..ef7adb5 100644
51846--- a/drivers/scsi/bfa/bfa_fcs.c
51847+++ b/drivers/scsi/bfa/bfa_fcs.c
51848@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
51849 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
51850
51851 static struct bfa_fcs_mod_s fcs_modules[] = {
51852- { bfa_fcs_port_attach, NULL, NULL },
51853- { bfa_fcs_uf_attach, NULL, NULL },
51854- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
51855- bfa_fcs_fabric_modexit },
51856+ {
51857+ .attach = bfa_fcs_port_attach,
51858+ .modinit = NULL,
51859+ .modexit = NULL
51860+ },
51861+ {
51862+ .attach = bfa_fcs_uf_attach,
51863+ .modinit = NULL,
51864+ .modexit = NULL
51865+ },
51866+ {
51867+ .attach = bfa_fcs_fabric_attach,
51868+ .modinit = bfa_fcs_fabric_modinit,
51869+ .modexit = bfa_fcs_fabric_modexit
51870+ },
51871 };
51872
51873 /*
51874diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
51875index ff75ef8..2dfe00a 100644
51876--- a/drivers/scsi/bfa/bfa_fcs_lport.c
51877+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
51878@@ -89,15 +89,26 @@ static struct {
51879 void (*offline) (struct bfa_fcs_lport_s *port);
51880 } __port_action[] = {
51881 {
51882- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
51883- bfa_fcs_lport_unknown_offline}, {
51884- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
51885- bfa_fcs_lport_fab_offline}, {
51886- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
51887- bfa_fcs_lport_n2n_offline}, {
51888- bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
51889- bfa_fcs_lport_loop_offline},
51890- };
51891+ .init = bfa_fcs_lport_unknown_init,
51892+ .online = bfa_fcs_lport_unknown_online,
51893+ .offline = bfa_fcs_lport_unknown_offline
51894+ },
51895+ {
51896+ .init = bfa_fcs_lport_fab_init,
51897+ .online = bfa_fcs_lport_fab_online,
51898+ .offline = bfa_fcs_lport_fab_offline
51899+ },
51900+ {
51901+ .init = bfa_fcs_lport_n2n_init,
51902+ .online = bfa_fcs_lport_n2n_online,
51903+ .offline = bfa_fcs_lport_n2n_offline
51904+ },
51905+ {
51906+ .init = bfa_fcs_lport_loop_init,
51907+ .online = bfa_fcs_lport_loop_online,
51908+ .offline = bfa_fcs_lport_loop_offline
51909+ },
51910+};
51911
51912 /*
51913 * fcs_port_sm FCS logical port state machine
51914diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
51915index a38aafa0..fe8f03b 100644
51916--- a/drivers/scsi/bfa/bfa_ioc.h
51917+++ b/drivers/scsi/bfa/bfa_ioc.h
51918@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
51919 bfa_ioc_disable_cbfn_t disable_cbfn;
51920 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
51921 bfa_ioc_reset_cbfn_t reset_cbfn;
51922-};
51923+} __no_const;
51924
51925 /*
51926 * IOC event notification mechanism.
51927@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
51928 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
51929 enum bfi_ioc_state fwstate);
51930 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
51931-};
51932+} __no_const;
51933
51934 /*
51935 * Queue element to wait for room in request queue. FIFO order is
51936diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
51937index a14c784..6de6790 100644
51938--- a/drivers/scsi/bfa/bfa_modules.h
51939+++ b/drivers/scsi/bfa/bfa_modules.h
51940@@ -78,12 +78,12 @@ enum {
51941 \
51942 extern struct bfa_module_s hal_mod_ ## __mod; \
51943 struct bfa_module_s hal_mod_ ## __mod = { \
51944- bfa_ ## __mod ## _meminfo, \
51945- bfa_ ## __mod ## _attach, \
51946- bfa_ ## __mod ## _detach, \
51947- bfa_ ## __mod ## _start, \
51948- bfa_ ## __mod ## _stop, \
51949- bfa_ ## __mod ## _iocdisable, \
51950+ .meminfo = bfa_ ## __mod ## _meminfo, \
51951+ .attach = bfa_ ## __mod ## _attach, \
51952+ .detach = bfa_ ## __mod ## _detach, \
51953+ .start = bfa_ ## __mod ## _start, \
51954+ .stop = bfa_ ## __mod ## _stop, \
51955+ .iocdisable = bfa_ ## __mod ## _iocdisable, \
51956 }
51957
51958 #define BFA_CACHELINE_SZ (256)
51959diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
51960index 045c4e1..13de803 100644
51961--- a/drivers/scsi/fcoe/fcoe_sysfs.c
51962+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
51963@@ -33,8 +33,8 @@
51964 */
51965 #include "libfcoe.h"
51966
51967-static atomic_t ctlr_num;
51968-static atomic_t fcf_num;
51969+static atomic_unchecked_t ctlr_num;
51970+static atomic_unchecked_t fcf_num;
51971
51972 /*
51973 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
51974@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
51975 if (!ctlr)
51976 goto out;
51977
51978- ctlr->id = atomic_inc_return(&ctlr_num) - 1;
51979+ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
51980 ctlr->f = f;
51981 ctlr->mode = FIP_CONN_TYPE_FABRIC;
51982 INIT_LIST_HEAD(&ctlr->fcfs);
51983@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
51984 fcf->dev.parent = &ctlr->dev;
51985 fcf->dev.bus = &fcoe_bus_type;
51986 fcf->dev.type = &fcoe_fcf_device_type;
51987- fcf->id = atomic_inc_return(&fcf_num) - 1;
51988+ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
51989 fcf->state = FCOE_FCF_STATE_UNKNOWN;
51990
51991 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
51992@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
51993 {
51994 int error;
51995
51996- atomic_set(&ctlr_num, 0);
51997- atomic_set(&fcf_num, 0);
51998+ atomic_set_unchecked(&ctlr_num, 0);
51999+ atomic_set_unchecked(&fcf_num, 0);
52000
52001 error = bus_register(&fcoe_bus_type);
52002 if (error)
52003diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
52004index 6de80e3..a11e0ac 100644
52005--- a/drivers/scsi/hosts.c
52006+++ b/drivers/scsi/hosts.c
52007@@ -42,7 +42,7 @@
52008 #include "scsi_logging.h"
52009
52010
52011-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
52012+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
52013
52014
52015 static void scsi_host_cls_release(struct device *dev)
52016@@ -392,7 +392,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
52017 * subtract one because we increment first then return, but we need to
52018 * know what the next host number was before increment
52019 */
52020- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
52021+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
52022 shost->dma_channel = 0xff;
52023
52024 /* These three are default values which can be overridden */
52025diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
52026index 6b35d0d..2880305 100644
52027--- a/drivers/scsi/hpsa.c
52028+++ b/drivers/scsi/hpsa.c
52029@@ -701,10 +701,10 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
52030 unsigned long flags;
52031
52032 if (h->transMethod & CFGTBL_Trans_io_accel1)
52033- return h->access.command_completed(h, q);
52034+ return h->access->command_completed(h, q);
52035
52036 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
52037- return h->access.command_completed(h, q);
52038+ return h->access->command_completed(h, q);
52039
52040 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
52041 a = rq->head[rq->current_entry];
52042@@ -5454,7 +5454,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
52043 while (!list_empty(&h->reqQ)) {
52044 c = list_entry(h->reqQ.next, struct CommandList, list);
52045 /* can't do anything if fifo is full */
52046- if ((h->access.fifo_full(h))) {
52047+ if ((h->access->fifo_full(h))) {
52048 h->fifo_recently_full = 1;
52049 dev_warn(&h->pdev->dev, "fifo full\n");
52050 break;
52051@@ -5476,7 +5476,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
52052
52053 /* Tell the controller execute command */
52054 spin_unlock_irqrestore(&h->lock, *flags);
52055- h->access.submit_command(h, c);
52056+ h->access->submit_command(h, c);
52057 spin_lock_irqsave(&h->lock, *flags);
52058 }
52059 }
52060@@ -5492,17 +5492,17 @@ static void lock_and_start_io(struct ctlr_info *h)
52061
52062 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
52063 {
52064- return h->access.command_completed(h, q);
52065+ return h->access->command_completed(h, q);
52066 }
52067
52068 static inline bool interrupt_pending(struct ctlr_info *h)
52069 {
52070- return h->access.intr_pending(h);
52071+ return h->access->intr_pending(h);
52072 }
52073
52074 static inline long interrupt_not_for_us(struct ctlr_info *h)
52075 {
52076- return (h->access.intr_pending(h) == 0) ||
52077+ return (h->access->intr_pending(h) == 0) ||
52078 (h->interrupts_enabled == 0);
52079 }
52080
52081@@ -6458,7 +6458,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
52082 if (prod_index < 0)
52083 return -ENODEV;
52084 h->product_name = products[prod_index].product_name;
52085- h->access = *(products[prod_index].access);
52086+ h->access = products[prod_index].access;
52087
52088 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
52089 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
52090@@ -6780,7 +6780,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
52091 unsigned long flags;
52092 u32 lockup_detected;
52093
52094- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52095+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52096 spin_lock_irqsave(&h->lock, flags);
52097 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
52098 if (!lockup_detected) {
52099@@ -7027,7 +7027,7 @@ reinit_after_soft_reset:
52100 }
52101
52102 /* make sure the board interrupts are off */
52103- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52104+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52105
52106 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
52107 goto clean2;
52108@@ -7062,7 +7062,7 @@ reinit_after_soft_reset:
52109 * fake ones to scoop up any residual completions.
52110 */
52111 spin_lock_irqsave(&h->lock, flags);
52112- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52113+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52114 spin_unlock_irqrestore(&h->lock, flags);
52115 free_irqs(h);
52116 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
52117@@ -7081,9 +7081,9 @@ reinit_after_soft_reset:
52118 dev_info(&h->pdev->dev, "Board READY.\n");
52119 dev_info(&h->pdev->dev,
52120 "Waiting for stale completions to drain.\n");
52121- h->access.set_intr_mask(h, HPSA_INTR_ON);
52122+ h->access->set_intr_mask(h, HPSA_INTR_ON);
52123 msleep(10000);
52124- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52125+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52126
52127 rc = controller_reset_failed(h->cfgtable);
52128 if (rc)
52129@@ -7109,7 +7109,7 @@ reinit_after_soft_reset:
52130 h->drv_req_rescan = 0;
52131
52132 /* Turn the interrupts on so we can service requests */
52133- h->access.set_intr_mask(h, HPSA_INTR_ON);
52134+ h->access->set_intr_mask(h, HPSA_INTR_ON);
52135
52136 hpsa_hba_inquiry(h);
52137 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
52138@@ -7174,7 +7174,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
52139 * To write all data in the battery backed cache to disks
52140 */
52141 hpsa_flush_cache(h);
52142- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52143+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52144 hpsa_free_irqs_and_disable_msix(h);
52145 }
52146
52147@@ -7292,7 +7292,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52148 CFGTBL_Trans_enable_directed_msix |
52149 (trans_support & (CFGTBL_Trans_io_accel1 |
52150 CFGTBL_Trans_io_accel2));
52151- struct access_method access = SA5_performant_access;
52152+ struct access_method *access = &SA5_performant_access;
52153
52154 /* This is a bit complicated. There are 8 registers on
52155 * the controller which we write to to tell it 8 different
52156@@ -7334,7 +7334,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52157 * perform the superfluous readl() after each command submission.
52158 */
52159 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
52160- access = SA5_performant_access_no_read;
52161+ access = &SA5_performant_access_no_read;
52162
52163 /* Controller spec: zero out this buffer. */
52164 for (i = 0; i < h->nreply_queues; i++)
52165@@ -7364,12 +7364,12 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52166 * enable outbound interrupt coalescing in accelerator mode;
52167 */
52168 if (trans_support & CFGTBL_Trans_io_accel1) {
52169- access = SA5_ioaccel_mode1_access;
52170+ access = &SA5_ioaccel_mode1_access;
52171 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
52172 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
52173 } else {
52174 if (trans_support & CFGTBL_Trans_io_accel2) {
52175- access = SA5_ioaccel_mode2_access;
52176+ access = &SA5_ioaccel_mode2_access;
52177 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
52178 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
52179 }
52180diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
52181index 24472ce..8782caf 100644
52182--- a/drivers/scsi/hpsa.h
52183+++ b/drivers/scsi/hpsa.h
52184@@ -127,7 +127,7 @@ struct ctlr_info {
52185 unsigned int msix_vector;
52186 unsigned int msi_vector;
52187 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
52188- struct access_method access;
52189+ struct access_method *access;
52190 char hba_mode_enabled;
52191
52192 /* queue and queue Info */
52193@@ -536,43 +536,43 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
52194 }
52195
52196 static struct access_method SA5_access = {
52197- SA5_submit_command,
52198- SA5_intr_mask,
52199- SA5_fifo_full,
52200- SA5_intr_pending,
52201- SA5_completed,
52202+ .submit_command = SA5_submit_command,
52203+ .set_intr_mask = SA5_intr_mask,
52204+ .fifo_full = SA5_fifo_full,
52205+ .intr_pending = SA5_intr_pending,
52206+ .command_completed = SA5_completed,
52207 };
52208
52209 static struct access_method SA5_ioaccel_mode1_access = {
52210- SA5_submit_command,
52211- SA5_performant_intr_mask,
52212- SA5_fifo_full,
52213- SA5_ioaccel_mode1_intr_pending,
52214- SA5_ioaccel_mode1_completed,
52215+ .submit_command = SA5_submit_command,
52216+ .set_intr_mask = SA5_performant_intr_mask,
52217+ .fifo_full = SA5_fifo_full,
52218+ .intr_pending = SA5_ioaccel_mode1_intr_pending,
52219+ .command_completed = SA5_ioaccel_mode1_completed,
52220 };
52221
52222 static struct access_method SA5_ioaccel_mode2_access = {
52223- SA5_submit_command_ioaccel2,
52224- SA5_performant_intr_mask,
52225- SA5_fifo_full,
52226- SA5_performant_intr_pending,
52227- SA5_performant_completed,
52228+ .submit_command = SA5_submit_command_ioaccel2,
52229+ .set_intr_mask = SA5_performant_intr_mask,
52230+ .fifo_full = SA5_fifo_full,
52231+ .intr_pending = SA5_performant_intr_pending,
52232+ .command_completed = SA5_performant_completed,
52233 };
52234
52235 static struct access_method SA5_performant_access = {
52236- SA5_submit_command,
52237- SA5_performant_intr_mask,
52238- SA5_fifo_full,
52239- SA5_performant_intr_pending,
52240- SA5_performant_completed,
52241+ .submit_command = SA5_submit_command,
52242+ .set_intr_mask = SA5_performant_intr_mask,
52243+ .fifo_full = SA5_fifo_full,
52244+ .intr_pending = SA5_performant_intr_pending,
52245+ .command_completed = SA5_performant_completed,
52246 };
52247
52248 static struct access_method SA5_performant_access_no_read = {
52249- SA5_submit_command_no_read,
52250- SA5_performant_intr_mask,
52251- SA5_fifo_full,
52252- SA5_performant_intr_pending,
52253- SA5_performant_completed,
52254+ .submit_command = SA5_submit_command_no_read,
52255+ .set_intr_mask = SA5_performant_intr_mask,
52256+ .fifo_full = SA5_fifo_full,
52257+ .intr_pending = SA5_performant_intr_pending,
52258+ .command_completed = SA5_performant_completed,
52259 };
52260
52261 struct board_type {
52262diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
52263index 1b3a094..068e683 100644
52264--- a/drivers/scsi/libfc/fc_exch.c
52265+++ b/drivers/scsi/libfc/fc_exch.c
52266@@ -101,12 +101,12 @@ struct fc_exch_mgr {
52267 u16 pool_max_index;
52268
52269 struct {
52270- atomic_t no_free_exch;
52271- atomic_t no_free_exch_xid;
52272- atomic_t xid_not_found;
52273- atomic_t xid_busy;
52274- atomic_t seq_not_found;
52275- atomic_t non_bls_resp;
52276+ atomic_unchecked_t no_free_exch;
52277+ atomic_unchecked_t no_free_exch_xid;
52278+ atomic_unchecked_t xid_not_found;
52279+ atomic_unchecked_t xid_busy;
52280+ atomic_unchecked_t seq_not_found;
52281+ atomic_unchecked_t non_bls_resp;
52282 } stats;
52283 };
52284
52285@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
52286 /* allocate memory for exchange */
52287 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
52288 if (!ep) {
52289- atomic_inc(&mp->stats.no_free_exch);
52290+ atomic_inc_unchecked(&mp->stats.no_free_exch);
52291 goto out;
52292 }
52293 memset(ep, 0, sizeof(*ep));
52294@@ -874,7 +874,7 @@ out:
52295 return ep;
52296 err:
52297 spin_unlock_bh(&pool->lock);
52298- atomic_inc(&mp->stats.no_free_exch_xid);
52299+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
52300 mempool_free(ep, mp->ep_pool);
52301 return NULL;
52302 }
52303@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52304 xid = ntohs(fh->fh_ox_id); /* we originated exch */
52305 ep = fc_exch_find(mp, xid);
52306 if (!ep) {
52307- atomic_inc(&mp->stats.xid_not_found);
52308+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52309 reject = FC_RJT_OX_ID;
52310 goto out;
52311 }
52312@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52313 ep = fc_exch_find(mp, xid);
52314 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
52315 if (ep) {
52316- atomic_inc(&mp->stats.xid_busy);
52317+ atomic_inc_unchecked(&mp->stats.xid_busy);
52318 reject = FC_RJT_RX_ID;
52319 goto rel;
52320 }
52321@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52322 }
52323 xid = ep->xid; /* get our XID */
52324 } else if (!ep) {
52325- atomic_inc(&mp->stats.xid_not_found);
52326+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52327 reject = FC_RJT_RX_ID; /* XID not found */
52328 goto out;
52329 }
52330@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52331 } else {
52332 sp = &ep->seq;
52333 if (sp->id != fh->fh_seq_id) {
52334- atomic_inc(&mp->stats.seq_not_found);
52335+ atomic_inc_unchecked(&mp->stats.seq_not_found);
52336 if (f_ctl & FC_FC_END_SEQ) {
52337 /*
52338 * Update sequence_id based on incoming last
52339@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52340
52341 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
52342 if (!ep) {
52343- atomic_inc(&mp->stats.xid_not_found);
52344+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52345 goto out;
52346 }
52347 if (ep->esb_stat & ESB_ST_COMPLETE) {
52348- atomic_inc(&mp->stats.xid_not_found);
52349+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52350 goto rel;
52351 }
52352 if (ep->rxid == FC_XID_UNKNOWN)
52353 ep->rxid = ntohs(fh->fh_rx_id);
52354 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
52355- atomic_inc(&mp->stats.xid_not_found);
52356+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52357 goto rel;
52358 }
52359 if (ep->did != ntoh24(fh->fh_s_id) &&
52360 ep->did != FC_FID_FLOGI) {
52361- atomic_inc(&mp->stats.xid_not_found);
52362+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52363 goto rel;
52364 }
52365 sof = fr_sof(fp);
52366@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52367 sp->ssb_stat |= SSB_ST_RESP;
52368 sp->id = fh->fh_seq_id;
52369 } else if (sp->id != fh->fh_seq_id) {
52370- atomic_inc(&mp->stats.seq_not_found);
52371+ atomic_inc_unchecked(&mp->stats.seq_not_found);
52372 goto rel;
52373 }
52374
52375@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52376 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
52377
52378 if (!sp)
52379- atomic_inc(&mp->stats.xid_not_found);
52380+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52381 else
52382- atomic_inc(&mp->stats.non_bls_resp);
52383+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
52384
52385 fc_frame_free(fp);
52386 }
52387@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
52388
52389 list_for_each_entry(ema, &lport->ema_list, ema_list) {
52390 mp = ema->mp;
52391- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
52392+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
52393 st->fc_no_free_exch_xid +=
52394- atomic_read(&mp->stats.no_free_exch_xid);
52395- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
52396- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
52397- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
52398- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
52399+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
52400+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
52401+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
52402+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
52403+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
52404 }
52405 }
52406 EXPORT_SYMBOL(fc_exch_update_stats);
52407diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
52408index 766098a..1c6c971 100644
52409--- a/drivers/scsi/libsas/sas_ata.c
52410+++ b/drivers/scsi/libsas/sas_ata.c
52411@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
52412 .postreset = ata_std_postreset,
52413 .error_handler = ata_std_error_handler,
52414 .post_internal_cmd = sas_ata_post_internal,
52415- .qc_defer = ata_std_qc_defer,
52416+ .qc_defer = ata_std_qc_defer,
52417 .qc_prep = ata_noop_qc_prep,
52418 .qc_issue = sas_ata_qc_issue,
52419 .qc_fill_rtf = sas_ata_qc_fill_rtf,
52420diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
52421index 434e903..5a4a79b 100644
52422--- a/drivers/scsi/lpfc/lpfc.h
52423+++ b/drivers/scsi/lpfc/lpfc.h
52424@@ -430,7 +430,7 @@ struct lpfc_vport {
52425 struct dentry *debug_nodelist;
52426 struct dentry *vport_debugfs_root;
52427 struct lpfc_debugfs_trc *disc_trc;
52428- atomic_t disc_trc_cnt;
52429+ atomic_unchecked_t disc_trc_cnt;
52430 #endif
52431 uint8_t stat_data_enabled;
52432 uint8_t stat_data_blocked;
52433@@ -880,8 +880,8 @@ struct lpfc_hba {
52434 struct timer_list fabric_block_timer;
52435 unsigned long bit_flags;
52436 #define FABRIC_COMANDS_BLOCKED 0
52437- atomic_t num_rsrc_err;
52438- atomic_t num_cmd_success;
52439+ atomic_unchecked_t num_rsrc_err;
52440+ atomic_unchecked_t num_cmd_success;
52441 unsigned long last_rsrc_error_time;
52442 unsigned long last_ramp_down_time;
52443 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
52444@@ -916,7 +916,7 @@ struct lpfc_hba {
52445
52446 struct dentry *debug_slow_ring_trc;
52447 struct lpfc_debugfs_trc *slow_ring_trc;
52448- atomic_t slow_ring_trc_cnt;
52449+ atomic_unchecked_t slow_ring_trc_cnt;
52450 /* iDiag debugfs sub-directory */
52451 struct dentry *idiag_root;
52452 struct dentry *idiag_pci_cfg;
52453diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
52454index b0aedce..89c6ca6 100644
52455--- a/drivers/scsi/lpfc/lpfc_debugfs.c
52456+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
52457@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
52458
52459 #include <linux/debugfs.h>
52460
52461-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
52462+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
52463 static unsigned long lpfc_debugfs_start_time = 0L;
52464
52465 /* iDiag */
52466@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
52467 lpfc_debugfs_enable = 0;
52468
52469 len = 0;
52470- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
52471+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
52472 (lpfc_debugfs_max_disc_trc - 1);
52473 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
52474 dtp = vport->disc_trc + i;
52475@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
52476 lpfc_debugfs_enable = 0;
52477
52478 len = 0;
52479- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
52480+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
52481 (lpfc_debugfs_max_slow_ring_trc - 1);
52482 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
52483 dtp = phba->slow_ring_trc + i;
52484@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
52485 !vport || !vport->disc_trc)
52486 return;
52487
52488- index = atomic_inc_return(&vport->disc_trc_cnt) &
52489+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
52490 (lpfc_debugfs_max_disc_trc - 1);
52491 dtp = vport->disc_trc + index;
52492 dtp->fmt = fmt;
52493 dtp->data1 = data1;
52494 dtp->data2 = data2;
52495 dtp->data3 = data3;
52496- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
52497+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
52498 dtp->jif = jiffies;
52499 #endif
52500 return;
52501@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
52502 !phba || !phba->slow_ring_trc)
52503 return;
52504
52505- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
52506+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
52507 (lpfc_debugfs_max_slow_ring_trc - 1);
52508 dtp = phba->slow_ring_trc + index;
52509 dtp->fmt = fmt;
52510 dtp->data1 = data1;
52511 dtp->data2 = data2;
52512 dtp->data3 = data3;
52513- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
52514+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
52515 dtp->jif = jiffies;
52516 #endif
52517 return;
52518@@ -4268,7 +4268,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
52519 "slow_ring buffer\n");
52520 goto debug_failed;
52521 }
52522- atomic_set(&phba->slow_ring_trc_cnt, 0);
52523+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
52524 memset(phba->slow_ring_trc, 0,
52525 (sizeof(struct lpfc_debugfs_trc) *
52526 lpfc_debugfs_max_slow_ring_trc));
52527@@ -4314,7 +4314,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
52528 "buffer\n");
52529 goto debug_failed;
52530 }
52531- atomic_set(&vport->disc_trc_cnt, 0);
52532+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
52533
52534 snprintf(name, sizeof(name), "discovery_trace");
52535 vport->debug_disc_trc =
52536diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
52537index a5769a9..718ecc7 100644
52538--- a/drivers/scsi/lpfc/lpfc_init.c
52539+++ b/drivers/scsi/lpfc/lpfc_init.c
52540@@ -11299,8 +11299,10 @@ lpfc_init(void)
52541 "misc_register returned with status %d", error);
52542
52543 if (lpfc_enable_npiv) {
52544- lpfc_transport_functions.vport_create = lpfc_vport_create;
52545- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
52546+ pax_open_kernel();
52547+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
52548+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
52549+ pax_close_kernel();
52550 }
52551 lpfc_transport_template =
52552 fc_attach_transport(&lpfc_transport_functions);
52553diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
52554index 7862c55..5aa65df 100644
52555--- a/drivers/scsi/lpfc/lpfc_scsi.c
52556+++ b/drivers/scsi/lpfc/lpfc_scsi.c
52557@@ -382,7 +382,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
52558 uint32_t evt_posted;
52559
52560 spin_lock_irqsave(&phba->hbalock, flags);
52561- atomic_inc(&phba->num_rsrc_err);
52562+ atomic_inc_unchecked(&phba->num_rsrc_err);
52563 phba->last_rsrc_error_time = jiffies;
52564
52565 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
52566@@ -423,8 +423,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
52567 unsigned long num_rsrc_err, num_cmd_success;
52568 int i;
52569
52570- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
52571- num_cmd_success = atomic_read(&phba->num_cmd_success);
52572+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
52573+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
52574
52575 /*
52576 * The error and success command counters are global per
52577@@ -452,8 +452,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
52578 }
52579 }
52580 lpfc_destroy_vport_work_array(phba, vports);
52581- atomic_set(&phba->num_rsrc_err, 0);
52582- atomic_set(&phba->num_cmd_success, 0);
52583+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
52584+ atomic_set_unchecked(&phba->num_cmd_success, 0);
52585 }
52586
52587 /**
52588diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52589index dd46101..ca80eb9 100644
52590--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52591+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52592@@ -1559,7 +1559,7 @@ _scsih_get_resync(struct device *dev)
52593 {
52594 struct scsi_device *sdev = to_scsi_device(dev);
52595 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
52596- static struct _raid_device *raid_device;
52597+ struct _raid_device *raid_device;
52598 unsigned long flags;
52599 Mpi2RaidVolPage0_t vol_pg0;
52600 Mpi2ConfigReply_t mpi_reply;
52601@@ -1611,7 +1611,7 @@ _scsih_get_state(struct device *dev)
52602 {
52603 struct scsi_device *sdev = to_scsi_device(dev);
52604 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
52605- static struct _raid_device *raid_device;
52606+ struct _raid_device *raid_device;
52607 unsigned long flags;
52608 Mpi2RaidVolPage0_t vol_pg0;
52609 Mpi2ConfigReply_t mpi_reply;
52610@@ -6648,7 +6648,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
52611 Mpi2EventDataIrOperationStatus_t *event_data =
52612 (Mpi2EventDataIrOperationStatus_t *)
52613 fw_event->event_data;
52614- static struct _raid_device *raid_device;
52615+ struct _raid_device *raid_device;
52616 unsigned long flags;
52617 u16 handle;
52618
52619@@ -7119,7 +7119,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
52620 u64 sas_address;
52621 struct _sas_device *sas_device;
52622 struct _sas_node *expander_device;
52623- static struct _raid_device *raid_device;
52624+ struct _raid_device *raid_device;
52625 u8 retry_count;
52626 unsigned long flags;
52627
52628diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
52629index 6f3275d..fa5e6b6 100644
52630--- a/drivers/scsi/pmcraid.c
52631+++ b/drivers/scsi/pmcraid.c
52632@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
52633 res->scsi_dev = scsi_dev;
52634 scsi_dev->hostdata = res;
52635 res->change_detected = 0;
52636- atomic_set(&res->read_failures, 0);
52637- atomic_set(&res->write_failures, 0);
52638+ atomic_set_unchecked(&res->read_failures, 0);
52639+ atomic_set_unchecked(&res->write_failures, 0);
52640 rc = 0;
52641 }
52642 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
52643@@ -2687,9 +2687,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
52644
52645 /* If this was a SCSI read/write command keep count of errors */
52646 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
52647- atomic_inc(&res->read_failures);
52648+ atomic_inc_unchecked(&res->read_failures);
52649 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
52650- atomic_inc(&res->write_failures);
52651+ atomic_inc_unchecked(&res->write_failures);
52652
52653 if (!RES_IS_GSCSI(res->cfg_entry) &&
52654 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
52655@@ -3545,7 +3545,7 @@ static int pmcraid_queuecommand_lck(
52656 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
52657 * hrrq_id assigned here in queuecommand
52658 */
52659- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
52660+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
52661 pinstance->num_hrrq;
52662 cmd->cmd_done = pmcraid_io_done;
52663
52664@@ -3857,7 +3857,7 @@ static long pmcraid_ioctl_passthrough(
52665 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
52666 * hrrq_id assigned here in queuecommand
52667 */
52668- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
52669+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
52670 pinstance->num_hrrq;
52671
52672 if (request_size) {
52673@@ -4495,7 +4495,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
52674
52675 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
52676 /* add resources only after host is added into system */
52677- if (!atomic_read(&pinstance->expose_resources))
52678+ if (!atomic_read_unchecked(&pinstance->expose_resources))
52679 return;
52680
52681 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
52682@@ -5322,8 +5322,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
52683 init_waitqueue_head(&pinstance->reset_wait_q);
52684
52685 atomic_set(&pinstance->outstanding_cmds, 0);
52686- atomic_set(&pinstance->last_message_id, 0);
52687- atomic_set(&pinstance->expose_resources, 0);
52688+ atomic_set_unchecked(&pinstance->last_message_id, 0);
52689+ atomic_set_unchecked(&pinstance->expose_resources, 0);
52690
52691 INIT_LIST_HEAD(&pinstance->free_res_q);
52692 INIT_LIST_HEAD(&pinstance->used_res_q);
52693@@ -6036,7 +6036,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
52694 /* Schedule worker thread to handle CCN and take care of adding and
52695 * removing devices to OS
52696 */
52697- atomic_set(&pinstance->expose_resources, 1);
52698+ atomic_set_unchecked(&pinstance->expose_resources, 1);
52699 schedule_work(&pinstance->worker_q);
52700 return rc;
52701
52702diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
52703index e1d150f..6c6df44 100644
52704--- a/drivers/scsi/pmcraid.h
52705+++ b/drivers/scsi/pmcraid.h
52706@@ -748,7 +748,7 @@ struct pmcraid_instance {
52707 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
52708
52709 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
52710- atomic_t last_message_id;
52711+ atomic_unchecked_t last_message_id;
52712
52713 /* configuration table */
52714 struct pmcraid_config_table *cfg_table;
52715@@ -777,7 +777,7 @@ struct pmcraid_instance {
52716 atomic_t outstanding_cmds;
52717
52718 /* should add/delete resources to mid-layer now ?*/
52719- atomic_t expose_resources;
52720+ atomic_unchecked_t expose_resources;
52721
52722
52723
52724@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
52725 struct pmcraid_config_table_entry_ext cfg_entry_ext;
52726 };
52727 struct scsi_device *scsi_dev; /* Link scsi_device structure */
52728- atomic_t read_failures; /* count of failed READ commands */
52729- atomic_t write_failures; /* count of failed WRITE commands */
52730+ atomic_unchecked_t read_failures; /* count of failed READ commands */
52731+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
52732
52733 /* To indicate add/delete/modify during CCN */
52734 u8 change_detected;
52735diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
52736index 16fe519..3b1ec82 100644
52737--- a/drivers/scsi/qla2xxx/qla_attr.c
52738+++ b/drivers/scsi/qla2xxx/qla_attr.c
52739@@ -2188,7 +2188,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
52740 return 0;
52741 }
52742
52743-struct fc_function_template qla2xxx_transport_functions = {
52744+fc_function_template_no_const qla2xxx_transport_functions = {
52745
52746 .show_host_node_name = 1,
52747 .show_host_port_name = 1,
52748@@ -2236,7 +2236,7 @@ struct fc_function_template qla2xxx_transport_functions = {
52749 .bsg_timeout = qla24xx_bsg_timeout,
52750 };
52751
52752-struct fc_function_template qla2xxx_transport_vport_functions = {
52753+fc_function_template_no_const qla2xxx_transport_vport_functions = {
52754
52755 .show_host_node_name = 1,
52756 .show_host_port_name = 1,
52757diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
52758index d646540..5b13554 100644
52759--- a/drivers/scsi/qla2xxx/qla_gbl.h
52760+++ b/drivers/scsi/qla2xxx/qla_gbl.h
52761@@ -569,8 +569,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
52762 struct device_attribute;
52763 extern struct device_attribute *qla2x00_host_attrs[];
52764 struct fc_function_template;
52765-extern struct fc_function_template qla2xxx_transport_functions;
52766-extern struct fc_function_template qla2xxx_transport_vport_functions;
52767+extern fc_function_template_no_const qla2xxx_transport_functions;
52768+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
52769 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
52770 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
52771 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
52772diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
52773index 8252c0e..613adad 100644
52774--- a/drivers/scsi/qla2xxx/qla_os.c
52775+++ b/drivers/scsi/qla2xxx/qla_os.c
52776@@ -1493,8 +1493,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
52777 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
52778 /* Ok, a 64bit DMA mask is applicable. */
52779 ha->flags.enable_64bit_addressing = 1;
52780- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
52781- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
52782+ pax_open_kernel();
52783+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
52784+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
52785+ pax_close_kernel();
52786 return;
52787 }
52788 }
52789diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
52790index 8f6d0fb..1b21097 100644
52791--- a/drivers/scsi/qla4xxx/ql4_def.h
52792+++ b/drivers/scsi/qla4xxx/ql4_def.h
52793@@ -305,7 +305,7 @@ struct ddb_entry {
52794 * (4000 only) */
52795 atomic_t relogin_timer; /* Max Time to wait for
52796 * relogin to complete */
52797- atomic_t relogin_retry_count; /* Num of times relogin has been
52798+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
52799 * retried */
52800 uint32_t default_time2wait; /* Default Min time between
52801 * relogins (+aens) */
52802diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
52803index 199fcf7..3c3a918 100644
52804--- a/drivers/scsi/qla4xxx/ql4_os.c
52805+++ b/drivers/scsi/qla4xxx/ql4_os.c
52806@@ -4496,12 +4496,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
52807 */
52808 if (!iscsi_is_session_online(cls_sess)) {
52809 /* Reset retry relogin timer */
52810- atomic_inc(&ddb_entry->relogin_retry_count);
52811+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
52812 DEBUG2(ql4_printk(KERN_INFO, ha,
52813 "%s: index[%d] relogin timed out-retrying"
52814 " relogin (%d), retry (%d)\n", __func__,
52815 ddb_entry->fw_ddb_index,
52816- atomic_read(&ddb_entry->relogin_retry_count),
52817+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
52818 ddb_entry->default_time2wait + 4));
52819 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
52820 atomic_set(&ddb_entry->retry_relogin_timer,
52821@@ -6609,7 +6609,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
52822
52823 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
52824 atomic_set(&ddb_entry->relogin_timer, 0);
52825- atomic_set(&ddb_entry->relogin_retry_count, 0);
52826+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
52827 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
52828 ddb_entry->default_relogin_timeout =
52829 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
52830diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
52831index d81f3cc..0093e5b 100644
52832--- a/drivers/scsi/scsi.c
52833+++ b/drivers/scsi/scsi.c
52834@@ -645,7 +645,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
52835 struct Scsi_Host *host = cmd->device->host;
52836 int rtn = 0;
52837
52838- atomic_inc(&cmd->device->iorequest_cnt);
52839+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
52840
52841 /* check if the device is still usable */
52842 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
52843diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
52844index aaea4b9..c64408d 100644
52845--- a/drivers/scsi/scsi_lib.c
52846+++ b/drivers/scsi/scsi_lib.c
52847@@ -1581,7 +1581,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
52848 shost = sdev->host;
52849 scsi_init_cmd_errh(cmd);
52850 cmd->result = DID_NO_CONNECT << 16;
52851- atomic_inc(&cmd->device->iorequest_cnt);
52852+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
52853
52854 /*
52855 * SCSI request completion path will do scsi_device_unbusy(),
52856@@ -1604,9 +1604,9 @@ static void scsi_softirq_done(struct request *rq)
52857
52858 INIT_LIST_HEAD(&cmd->eh_entry);
52859
52860- atomic_inc(&cmd->device->iodone_cnt);
52861+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
52862 if (cmd->result)
52863- atomic_inc(&cmd->device->ioerr_cnt);
52864+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
52865
52866 disposition = scsi_decide_disposition(cmd);
52867 if (disposition != SUCCESS &&
52868diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
52869index 8b4105a..1f58363 100644
52870--- a/drivers/scsi/scsi_sysfs.c
52871+++ b/drivers/scsi/scsi_sysfs.c
52872@@ -805,7 +805,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
52873 char *buf) \
52874 { \
52875 struct scsi_device *sdev = to_scsi_device(dev); \
52876- unsigned long long count = atomic_read(&sdev->field); \
52877+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
52878 return snprintf(buf, 20, "0x%llx\n", count); \
52879 } \
52880 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
52881diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
52882index 5d6f348..18778a6b 100644
52883--- a/drivers/scsi/scsi_transport_fc.c
52884+++ b/drivers/scsi/scsi_transport_fc.c
52885@@ -501,7 +501,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
52886 * Netlink Infrastructure
52887 */
52888
52889-static atomic_t fc_event_seq;
52890+static atomic_unchecked_t fc_event_seq;
52891
52892 /**
52893 * fc_get_event_number - Obtain the next sequential FC event number
52894@@ -514,7 +514,7 @@ static atomic_t fc_event_seq;
52895 u32
52896 fc_get_event_number(void)
52897 {
52898- return atomic_add_return(1, &fc_event_seq);
52899+ return atomic_add_return_unchecked(1, &fc_event_seq);
52900 }
52901 EXPORT_SYMBOL(fc_get_event_number);
52902
52903@@ -658,7 +658,7 @@ static __init int fc_transport_init(void)
52904 {
52905 int error;
52906
52907- atomic_set(&fc_event_seq, 0);
52908+ atomic_set_unchecked(&fc_event_seq, 0);
52909
52910 error = transport_class_register(&fc_host_class);
52911 if (error)
52912@@ -848,7 +848,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
52913 char *cp;
52914
52915 *val = simple_strtoul(buf, &cp, 0);
52916- if ((*cp && (*cp != '\n')) || (*val < 0))
52917+ if (*cp && (*cp != '\n'))
52918 return -EINVAL;
52919 /*
52920 * Check for overflow; dev_loss_tmo is u32
52921diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
52922index 67d43e3..8cee73c 100644
52923--- a/drivers/scsi/scsi_transport_iscsi.c
52924+++ b/drivers/scsi/scsi_transport_iscsi.c
52925@@ -79,7 +79,7 @@ struct iscsi_internal {
52926 struct transport_container session_cont;
52927 };
52928
52929-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
52930+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
52931 static struct workqueue_struct *iscsi_eh_timer_workq;
52932
52933 static DEFINE_IDA(iscsi_sess_ida);
52934@@ -2071,7 +2071,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
52935 int err;
52936
52937 ihost = shost->shost_data;
52938- session->sid = atomic_add_return(1, &iscsi_session_nr);
52939+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
52940
52941 if (target_id == ISCSI_MAX_TARGET) {
52942 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
52943@@ -4515,7 +4515,7 @@ static __init int iscsi_transport_init(void)
52944 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
52945 ISCSI_TRANSPORT_VERSION);
52946
52947- atomic_set(&iscsi_session_nr, 0);
52948+ atomic_set_unchecked(&iscsi_session_nr, 0);
52949
52950 err = class_register(&iscsi_transport_class);
52951 if (err)
52952diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
52953index ae45bd9..c32a586 100644
52954--- a/drivers/scsi/scsi_transport_srp.c
52955+++ b/drivers/scsi/scsi_transport_srp.c
52956@@ -35,7 +35,7 @@
52957 #include "scsi_priv.h"
52958
52959 struct srp_host_attrs {
52960- atomic_t next_port_id;
52961+ atomic_unchecked_t next_port_id;
52962 };
52963 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
52964
52965@@ -100,7 +100,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
52966 struct Scsi_Host *shost = dev_to_shost(dev);
52967 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
52968
52969- atomic_set(&srp_host->next_port_id, 0);
52970+ atomic_set_unchecked(&srp_host->next_port_id, 0);
52971 return 0;
52972 }
52973
52974@@ -734,7 +734,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
52975 rport_fast_io_fail_timedout);
52976 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
52977
52978- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
52979+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
52980 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
52981
52982 transport_setup_device(&rport->dev);
52983diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
52984index 2c2041c..9d94085 100644
52985--- a/drivers/scsi/sd.c
52986+++ b/drivers/scsi/sd.c
52987@@ -3002,7 +3002,7 @@ static int sd_probe(struct device *dev)
52988 sdkp->disk = gd;
52989 sdkp->index = index;
52990 atomic_set(&sdkp->openers, 0);
52991- atomic_set(&sdkp->device->ioerr_cnt, 0);
52992+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
52993
52994 if (!sdp->request_queue->rq_timeout) {
52995 if (sdp->type != TYPE_MOD)
52996diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
52997index 01cf888..59e0475 100644
52998--- a/drivers/scsi/sg.c
52999+++ b/drivers/scsi/sg.c
53000@@ -1138,7 +1138,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
53001 sdp->disk->disk_name,
53002 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
53003 NULL,
53004- (char *)arg);
53005+ (char __user *)arg);
53006 case BLKTRACESTART:
53007 return blk_trace_startstop(sdp->device->request_queue, 1);
53008 case BLKTRACESTOP:
53009diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
53010index 11a5043..e36f04c 100644
53011--- a/drivers/soc/tegra/fuse/fuse-tegra.c
53012+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
53013@@ -70,7 +70,7 @@ static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
53014 return i;
53015 }
53016
53017-static struct bin_attribute fuse_bin_attr = {
53018+static bin_attribute_no_const fuse_bin_attr = {
53019 .attr = { .name = "fuse", .mode = S_IRUGO, },
53020 .read = fuse_read,
53021 };
53022diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
53023index ca935df..ae8a3dc 100644
53024--- a/drivers/spi/spi.c
53025+++ b/drivers/spi/spi.c
53026@@ -2210,7 +2210,7 @@ int spi_bus_unlock(struct spi_master *master)
53027 EXPORT_SYMBOL_GPL(spi_bus_unlock);
53028
53029 /* portable code must never pass more than 32 bytes */
53030-#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
53031+#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES)
53032
53033 static u8 *buf;
53034
53035diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
53036index b41429f..2de5373 100644
53037--- a/drivers/staging/android/timed_output.c
53038+++ b/drivers/staging/android/timed_output.c
53039@@ -25,7 +25,7 @@
53040 #include "timed_output.h"
53041
53042 static struct class *timed_output_class;
53043-static atomic_t device_count;
53044+static atomic_unchecked_t device_count;
53045
53046 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
53047 char *buf)
53048@@ -65,7 +65,7 @@ static int create_timed_output_class(void)
53049 timed_output_class = class_create(THIS_MODULE, "timed_output");
53050 if (IS_ERR(timed_output_class))
53051 return PTR_ERR(timed_output_class);
53052- atomic_set(&device_count, 0);
53053+ atomic_set_unchecked(&device_count, 0);
53054 timed_output_class->dev_groups = timed_output_groups;
53055 }
53056
53057@@ -83,7 +83,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
53058 if (ret < 0)
53059 return ret;
53060
53061- tdev->index = atomic_inc_return(&device_count);
53062+ tdev->index = atomic_inc_return_unchecked(&device_count);
53063 tdev->dev = device_create(timed_output_class, NULL,
53064 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
53065 if (IS_ERR(tdev->dev))
53066diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
53067index 001348c..cfaac8a 100644
53068--- a/drivers/staging/gdm724x/gdm_tty.c
53069+++ b/drivers/staging/gdm724x/gdm_tty.c
53070@@ -44,7 +44,7 @@
53071 #define gdm_tty_send_control(n, r, v, d, l) (\
53072 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
53073
53074-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
53075+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
53076
53077 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
53078 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
53079diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
53080index 6b22106..6c6e641 100644
53081--- a/drivers/staging/imx-drm/imx-drm-core.c
53082+++ b/drivers/staging/imx-drm/imx-drm-core.c
53083@@ -355,7 +355,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
53084 if (imxdrm->pipes >= MAX_CRTC)
53085 return -EINVAL;
53086
53087- if (imxdrm->drm->open_count)
53088+ if (local_read(&imxdrm->drm->open_count))
53089 return -EBUSY;
53090
53091 imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL);
53092diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
53093index bcce919..f30fcf9 100644
53094--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
53095+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
53096@@ -488,13 +488,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
53097 return 0;
53098 }
53099
53100-sfw_test_client_ops_t brw_test_client;
53101-void brw_init_test_client(void)
53102-{
53103- brw_test_client.tso_init = brw_client_init;
53104- brw_test_client.tso_fini = brw_client_fini;
53105- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
53106- brw_test_client.tso_done_rpc = brw_client_done_rpc;
53107+sfw_test_client_ops_t brw_test_client = {
53108+ .tso_init = brw_client_init,
53109+ .tso_fini = brw_client_fini,
53110+ .tso_prep_rpc = brw_client_prep_rpc,
53111+ .tso_done_rpc = brw_client_done_rpc,
53112 };
53113
53114 srpc_service_t brw_test_service;
53115diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
53116index 7e83dff..1f9a545 100644
53117--- a/drivers/staging/lustre/lnet/selftest/framework.c
53118+++ b/drivers/staging/lustre/lnet/selftest/framework.c
53119@@ -1633,12 +1633,10 @@ static srpc_service_t sfw_services[] =
53120
53121 extern sfw_test_client_ops_t ping_test_client;
53122 extern srpc_service_t ping_test_service;
53123-extern void ping_init_test_client(void);
53124 extern void ping_init_test_service(void);
53125
53126 extern sfw_test_client_ops_t brw_test_client;
53127 extern srpc_service_t brw_test_service;
53128-extern void brw_init_test_client(void);
53129 extern void brw_init_test_service(void);
53130
53131
53132@@ -1682,12 +1680,10 @@ sfw_startup (void)
53133 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
53134 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
53135
53136- brw_init_test_client();
53137 brw_init_test_service();
53138 rc = sfw_register_test(&brw_test_service, &brw_test_client);
53139 LASSERT (rc == 0);
53140
53141- ping_init_test_client();
53142 ping_init_test_service();
53143 rc = sfw_register_test(&ping_test_service, &ping_test_client);
53144 LASSERT (rc == 0);
53145diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
53146index 750cac4..e4d751f 100644
53147--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
53148+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
53149@@ -211,14 +211,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
53150 return 0;
53151 }
53152
53153-sfw_test_client_ops_t ping_test_client;
53154-void ping_init_test_client(void)
53155-{
53156- ping_test_client.tso_init = ping_client_init;
53157- ping_test_client.tso_fini = ping_client_fini;
53158- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
53159- ping_test_client.tso_done_rpc = ping_client_done_rpc;
53160-}
53161+sfw_test_client_ops_t ping_test_client = {
53162+ .tso_init = ping_client_init,
53163+ .tso_fini = ping_client_fini,
53164+ .tso_prep_rpc = ping_client_prep_rpc,
53165+ .tso_done_rpc = ping_client_done_rpc,
53166+};
53167
53168 srpc_service_t ping_test_service;
53169 void ping_init_test_service(void)
53170diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
53171index 30b1812f..9e5bd0b 100644
53172--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
53173+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
53174@@ -1141,7 +1141,7 @@ struct ldlm_callback_suite {
53175 ldlm_completion_callback lcs_completion;
53176 ldlm_blocking_callback lcs_blocking;
53177 ldlm_glimpse_callback lcs_glimpse;
53178-};
53179+} __no_const;
53180
53181 /* ldlm_lockd.c */
53182 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
53183diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
53184index 489bdd3..65058081 100644
53185--- a/drivers/staging/lustre/lustre/include/obd.h
53186+++ b/drivers/staging/lustre/lustre/include/obd.h
53187@@ -1438,7 +1438,7 @@ struct md_ops {
53188 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
53189 * wrapper function in include/linux/obd_class.h.
53190 */
53191-};
53192+} __no_const;
53193
53194 struct lsm_operations {
53195 void (*lsm_free)(struct lov_stripe_md *);
53196diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53197index b798daa..b28ca8f 100644
53198--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53199+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53200@@ -258,7 +258,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
53201 int added = (mode == LCK_NL);
53202 int overlaps = 0;
53203 int splitted = 0;
53204- const struct ldlm_callback_suite null_cbs = { NULL };
53205+ const struct ldlm_callback_suite null_cbs = { };
53206
53207 CDEBUG(D_DLMTRACE, "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
53208 *flags, new->l_policy_data.l_flock.owner,
53209diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53210index 13a9266..3439390 100644
53211--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53212+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53213@@ -235,7 +235,7 @@ int proc_console_max_delay_cs(struct ctl_table *table, int write,
53214 void __user *buffer, size_t *lenp, loff_t *ppos)
53215 {
53216 int rc, max_delay_cs;
53217- struct ctl_table dummy = *table;
53218+ ctl_table_no_const dummy = *table;
53219 long d;
53220
53221 dummy.data = &max_delay_cs;
53222@@ -267,7 +267,7 @@ int proc_console_min_delay_cs(struct ctl_table *table, int write,
53223 void __user *buffer, size_t *lenp, loff_t *ppos)
53224 {
53225 int rc, min_delay_cs;
53226- struct ctl_table dummy = *table;
53227+ ctl_table_no_const dummy = *table;
53228 long d;
53229
53230 dummy.data = &min_delay_cs;
53231@@ -299,7 +299,7 @@ int proc_console_backoff(struct ctl_table *table, int write,
53232 void __user *buffer, size_t *lenp, loff_t *ppos)
53233 {
53234 int rc, backoff;
53235- struct ctl_table dummy = *table;
53236+ ctl_table_no_const dummy = *table;
53237
53238 dummy.data = &backoff;
53239 dummy.proc_handler = &proc_dointvec;
53240diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
53241index 3396858..c0bd996 100644
53242--- a/drivers/staging/lustre/lustre/libcfs/module.c
53243+++ b/drivers/staging/lustre/lustre/libcfs/module.c
53244@@ -314,11 +314,11 @@ out:
53245
53246
53247 struct cfs_psdev_ops libcfs_psdev_ops = {
53248- libcfs_psdev_open,
53249- libcfs_psdev_release,
53250- NULL,
53251- NULL,
53252- libcfs_ioctl
53253+ .p_open = libcfs_psdev_open,
53254+ .p_close = libcfs_psdev_release,
53255+ .p_read = NULL,
53256+ .p_write = NULL,
53257+ .p_ioctl = libcfs_ioctl
53258 };
53259
53260 extern int insert_proc(void);
53261diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
53262index efa2faf..03a9836 100644
53263--- a/drivers/staging/lustre/lustre/llite/dir.c
53264+++ b/drivers/staging/lustre/lustre/llite/dir.c
53265@@ -659,7 +659,7 @@ int ll_dir_setdirstripe(struct inode *dir, struct lmv_user_md *lump,
53266 int mode;
53267 int err;
53268
53269- mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current->fs->umask) | S_IFDIR;
53270+ mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current_umask()) | S_IFDIR;
53271 op_data = ll_prep_md_op_data(NULL, dir, NULL, filename,
53272 strlen(filename), mode, LUSTRE_OPC_MKDIR,
53273 lump);
53274diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
53275index a0f4868..139f1fb 100644
53276--- a/drivers/staging/octeon/ethernet-rx.c
53277+++ b/drivers/staging/octeon/ethernet-rx.c
53278@@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
53279 /* Increment RX stats for virtual ports */
53280 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
53281 #ifdef CONFIG_64BIT
53282- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
53283- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
53284+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
53285+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
53286 #else
53287- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
53288- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
53289+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
53290+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
53291 #endif
53292 }
53293 netif_receive_skb(skb);
53294@@ -432,9 +432,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
53295 dev->name);
53296 */
53297 #ifdef CONFIG_64BIT
53298- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
53299+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
53300 #else
53301- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
53302+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
53303 #endif
53304 dev_kfree_skb_irq(skb);
53305 }
53306diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
53307index 2aa7235..ba3c205 100644
53308--- a/drivers/staging/octeon/ethernet.c
53309+++ b/drivers/staging/octeon/ethernet.c
53310@@ -247,11 +247,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
53311 * since the RX tasklet also increments it.
53312 */
53313 #ifdef CONFIG_64BIT
53314- atomic64_add(rx_status.dropped_packets,
53315- (atomic64_t *)&priv->stats.rx_dropped);
53316+ atomic64_add_unchecked(rx_status.dropped_packets,
53317+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
53318 #else
53319- atomic_add(rx_status.dropped_packets,
53320- (atomic_t *)&priv->stats.rx_dropped);
53321+ atomic_add_unchecked(rx_status.dropped_packets,
53322+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
53323 #endif
53324 }
53325
53326diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
53327index 56d5c50..a14f4db 100644
53328--- a/drivers/staging/rtl8188eu/include/hal_intf.h
53329+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
53330@@ -234,7 +234,7 @@ struct hal_ops {
53331
53332 void (*hal_notch_filter)(struct adapter *adapter, bool enable);
53333 void (*hal_reset_security_engine)(struct adapter *adapter);
53334-};
53335+} __no_const;
53336
53337 enum rt_eeprom_type {
53338 EEPROM_93C46,
53339diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
53340index dc23395..cf7e9b1 100644
53341--- a/drivers/staging/rtl8712/rtl871x_io.h
53342+++ b/drivers/staging/rtl8712/rtl871x_io.h
53343@@ -108,7 +108,7 @@ struct _io_ops {
53344 u8 *pmem);
53345 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
53346 u8 *pmem);
53347-};
53348+} __no_const;
53349
53350 struct io_req {
53351 struct list_head list;
53352diff --git a/drivers/staging/unisys/visorchipset/visorchipset.h b/drivers/staging/unisys/visorchipset/visorchipset.h
53353index 2bf2e2f..84421c9 100644
53354--- a/drivers/staging/unisys/visorchipset/visorchipset.h
53355+++ b/drivers/staging/unisys/visorchipset/visorchipset.h
53356@@ -228,7 +228,7 @@ typedef struct {
53357 void (*device_resume)(ulong busNo, ulong devNo);
53358 int (*get_channel_info)(uuid_le typeGuid, ulong *minSize,
53359 ulong *maxSize);
53360-} VISORCHIPSET_BUSDEV_NOTIFIERS;
53361+} __no_const VISORCHIPSET_BUSDEV_NOTIFIERS;
53362
53363 /* These functions live inside visorchipset, and will be called to indicate
53364 * responses to specific events (by code outside of visorchipset).
53365@@ -243,7 +243,7 @@ typedef struct {
53366 void (*device_destroy)(ulong busNo, ulong devNo, int response);
53367 void (*device_pause)(ulong busNo, ulong devNo, int response);
53368 void (*device_resume)(ulong busNo, ulong devNo, int response);
53369-} VISORCHIPSET_BUSDEV_RESPONDERS;
53370+} __no_const VISORCHIPSET_BUSDEV_RESPONDERS;
53371
53372 /** Register functions (in the bus driver) to get called by visorchipset
53373 * whenever a bus or device appears for which this service partition is
53374diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
53375index 164136b..7244df5 100644
53376--- a/drivers/staging/vt6655/hostap.c
53377+++ b/drivers/staging/vt6655/hostap.c
53378@@ -68,14 +68,13 @@ static int msglevel = MSG_LEVEL_INFO;
53379 *
53380 */
53381
53382+static net_device_ops_no_const apdev_netdev_ops;
53383+
53384 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
53385 {
53386 PSDevice apdev_priv;
53387 struct net_device *dev = pDevice->dev;
53388 int ret;
53389- const struct net_device_ops apdev_netdev_ops = {
53390- .ndo_start_xmit = pDevice->tx_80211,
53391- };
53392
53393 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
53394
53395@@ -87,6 +86,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
53396 *apdev_priv = *pDevice;
53397 eth_hw_addr_inherit(pDevice->apdev, dev);
53398
53399+ /* only half broken now */
53400+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
53401 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
53402
53403 pDevice->apdev->type = ARPHRD_IEEE80211;
53404diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
53405index e7e9372..161f530 100644
53406--- a/drivers/target/sbp/sbp_target.c
53407+++ b/drivers/target/sbp/sbp_target.c
53408@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
53409
53410 #define SESSION_MAINTENANCE_INTERVAL HZ
53411
53412-static atomic_t login_id = ATOMIC_INIT(0);
53413+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
53414
53415 static void session_maintenance_work(struct work_struct *);
53416 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
53417@@ -444,7 +444,7 @@ static void sbp_management_request_login(
53418 login->lun = se_lun;
53419 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
53420 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
53421- login->login_id = atomic_inc_return(&login_id);
53422+ login->login_id = atomic_inc_return_unchecked(&login_id);
53423
53424 login->tgt_agt = sbp_target_agent_register(login);
53425 if (IS_ERR(login->tgt_agt)) {
53426diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
53427index 98da901..bb443e8 100644
53428--- a/drivers/target/target_core_device.c
53429+++ b/drivers/target/target_core_device.c
53430@@ -1525,7 +1525,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
53431 spin_lock_init(&dev->se_tmr_lock);
53432 spin_lock_init(&dev->qf_cmd_lock);
53433 sema_init(&dev->caw_sem, 1);
53434- atomic_set(&dev->dev_ordered_id, 0);
53435+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
53436 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
53437 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
53438 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
53439diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
53440index 7fa62fc..abdd041 100644
53441--- a/drivers/target/target_core_transport.c
53442+++ b/drivers/target/target_core_transport.c
53443@@ -1165,7 +1165,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
53444 * Used to determine when ORDERED commands should go from
53445 * Dormant to Active status.
53446 */
53447- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
53448+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
53449 smp_mb__after_atomic();
53450 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
53451 cmd->se_ordered_id, cmd->sam_task_attr,
53452diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
53453index 4b2b999..cad9fa5 100644
53454--- a/drivers/thermal/of-thermal.c
53455+++ b/drivers/thermal/of-thermal.c
53456@@ -30,6 +30,7 @@
53457 #include <linux/err.h>
53458 #include <linux/export.h>
53459 #include <linux/string.h>
53460+#include <linux/mm.h>
53461
53462 #include "thermal_core.h"
53463
53464@@ -341,8 +342,10 @@ thermal_zone_of_add_sensor(struct device_node *zone,
53465 tz->get_trend = get_trend;
53466 tz->sensor_data = data;
53467
53468- tzd->ops->get_temp = of_thermal_get_temp;
53469- tzd->ops->get_trend = of_thermal_get_trend;
53470+ pax_open_kernel();
53471+ *(void **)&tzd->ops->get_temp = of_thermal_get_temp;
53472+ *(void **)&tzd->ops->get_trend = of_thermal_get_trend;
53473+ pax_close_kernel();
53474 mutex_unlock(&tzd->lock);
53475
53476 return tzd;
53477@@ -461,8 +464,10 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
53478 return;
53479
53480 mutex_lock(&tzd->lock);
53481- tzd->ops->get_temp = NULL;
53482- tzd->ops->get_trend = NULL;
53483+ pax_open_kernel();
53484+ *(void **)&tzd->ops->get_temp = NULL;
53485+ *(void **)&tzd->ops->get_trend = NULL;
53486+ pax_close_kernel();
53487
53488 tz->get_temp = NULL;
53489 tz->get_trend = NULL;
53490diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
53491index fd66f57..48e6376 100644
53492--- a/drivers/tty/cyclades.c
53493+++ b/drivers/tty/cyclades.c
53494@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
53495 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
53496 info->port.count);
53497 #endif
53498- info->port.count++;
53499+ atomic_inc(&info->port.count);
53500 #ifdef CY_DEBUG_COUNT
53501 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
53502- current->pid, info->port.count);
53503+ current->pid, atomic_read(&info->port.count));
53504 #endif
53505
53506 /*
53507@@ -3974,7 +3974,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
53508 for (j = 0; j < cy_card[i].nports; j++) {
53509 info = &cy_card[i].ports[j];
53510
53511- if (info->port.count) {
53512+ if (atomic_read(&info->port.count)) {
53513 /* XXX is the ldisc num worth this? */
53514 struct tty_struct *tty;
53515 struct tty_ldisc *ld;
53516diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
53517index 4fcec1d..5a036f7 100644
53518--- a/drivers/tty/hvc/hvc_console.c
53519+++ b/drivers/tty/hvc/hvc_console.c
53520@@ -342,7 +342,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
53521
53522 spin_lock_irqsave(&hp->port.lock, flags);
53523 /* Check and then increment for fast path open. */
53524- if (hp->port.count++ > 0) {
53525+ if (atomic_inc_return(&hp->port.count) > 1) {
53526 spin_unlock_irqrestore(&hp->port.lock, flags);
53527 hvc_kick();
53528 return 0;
53529@@ -397,7 +397,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
53530
53531 spin_lock_irqsave(&hp->port.lock, flags);
53532
53533- if (--hp->port.count == 0) {
53534+ if (atomic_dec_return(&hp->port.count) == 0) {
53535 spin_unlock_irqrestore(&hp->port.lock, flags);
53536 /* We are done with the tty pointer now. */
53537 tty_port_tty_set(&hp->port, NULL);
53538@@ -419,9 +419,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
53539 */
53540 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
53541 } else {
53542- if (hp->port.count < 0)
53543+ if (atomic_read(&hp->port.count) < 0)
53544 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
53545- hp->vtermno, hp->port.count);
53546+ hp->vtermno, atomic_read(&hp->port.count));
53547 spin_unlock_irqrestore(&hp->port.lock, flags);
53548 }
53549 }
53550@@ -451,12 +451,12 @@ static void hvc_hangup(struct tty_struct *tty)
53551 * open->hangup case this can be called after the final close so prevent
53552 * that from happening for now.
53553 */
53554- if (hp->port.count <= 0) {
53555+ if (atomic_read(&hp->port.count) <= 0) {
53556 spin_unlock_irqrestore(&hp->port.lock, flags);
53557 return;
53558 }
53559
53560- hp->port.count = 0;
53561+ atomic_set(&hp->port.count, 0);
53562 spin_unlock_irqrestore(&hp->port.lock, flags);
53563 tty_port_tty_set(&hp->port, NULL);
53564
53565@@ -504,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
53566 return -EPIPE;
53567
53568 /* FIXME what's this (unprotected) check for? */
53569- if (hp->port.count <= 0)
53570+ if (atomic_read(&hp->port.count) <= 0)
53571 return -EIO;
53572
53573 spin_lock_irqsave(&hp->lock, flags);
53574diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
53575index 81e939e..95ead10 100644
53576--- a/drivers/tty/hvc/hvcs.c
53577+++ b/drivers/tty/hvc/hvcs.c
53578@@ -83,6 +83,7 @@
53579 #include <asm/hvcserver.h>
53580 #include <asm/uaccess.h>
53581 #include <asm/vio.h>
53582+#include <asm/local.h>
53583
53584 /*
53585 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
53586@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
53587
53588 spin_lock_irqsave(&hvcsd->lock, flags);
53589
53590- if (hvcsd->port.count > 0) {
53591+ if (atomic_read(&hvcsd->port.count) > 0) {
53592 spin_unlock_irqrestore(&hvcsd->lock, flags);
53593 printk(KERN_INFO "HVCS: vterm state unchanged. "
53594 "The hvcs device node is still in use.\n");
53595@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
53596 }
53597 }
53598
53599- hvcsd->port.count = 0;
53600+ atomic_set(&hvcsd->port.count, 0);
53601 hvcsd->port.tty = tty;
53602 tty->driver_data = hvcsd;
53603
53604@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
53605 unsigned long flags;
53606
53607 spin_lock_irqsave(&hvcsd->lock, flags);
53608- hvcsd->port.count++;
53609+ atomic_inc(&hvcsd->port.count);
53610 hvcsd->todo_mask |= HVCS_SCHED_READ;
53611 spin_unlock_irqrestore(&hvcsd->lock, flags);
53612
53613@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
53614 hvcsd = tty->driver_data;
53615
53616 spin_lock_irqsave(&hvcsd->lock, flags);
53617- if (--hvcsd->port.count == 0) {
53618+ if (atomic_dec_and_test(&hvcsd->port.count)) {
53619
53620 vio_disable_interrupts(hvcsd->vdev);
53621
53622@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
53623
53624 free_irq(irq, hvcsd);
53625 return;
53626- } else if (hvcsd->port.count < 0) {
53627+ } else if (atomic_read(&hvcsd->port.count) < 0) {
53628 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
53629 " is missmanaged.\n",
53630- hvcsd->vdev->unit_address, hvcsd->port.count);
53631+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
53632 }
53633
53634 spin_unlock_irqrestore(&hvcsd->lock, flags);
53635@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
53636
53637 spin_lock_irqsave(&hvcsd->lock, flags);
53638 /* Preserve this so that we know how many kref refs to put */
53639- temp_open_count = hvcsd->port.count;
53640+ temp_open_count = atomic_read(&hvcsd->port.count);
53641
53642 /*
53643 * Don't kref put inside the spinlock because the destruction
53644@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
53645 tty->driver_data = NULL;
53646 hvcsd->port.tty = NULL;
53647
53648- hvcsd->port.count = 0;
53649+ atomic_set(&hvcsd->port.count, 0);
53650
53651 /* This will drop any buffered data on the floor which is OK in a hangup
53652 * scenario. */
53653@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
53654 * the middle of a write operation? This is a crummy place to do this
53655 * but we want to keep it all in the spinlock.
53656 */
53657- if (hvcsd->port.count <= 0) {
53658+ if (atomic_read(&hvcsd->port.count) <= 0) {
53659 spin_unlock_irqrestore(&hvcsd->lock, flags);
53660 return -ENODEV;
53661 }
53662@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
53663 {
53664 struct hvcs_struct *hvcsd = tty->driver_data;
53665
53666- if (!hvcsd || hvcsd->port.count <= 0)
53667+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
53668 return 0;
53669
53670 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
53671diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
53672index 4190199..06d5bfa 100644
53673--- a/drivers/tty/hvc/hvsi.c
53674+++ b/drivers/tty/hvc/hvsi.c
53675@@ -85,7 +85,7 @@ struct hvsi_struct {
53676 int n_outbuf;
53677 uint32_t vtermno;
53678 uint32_t virq;
53679- atomic_t seqno; /* HVSI packet sequence number */
53680+ atomic_unchecked_t seqno; /* HVSI packet sequence number */
53681 uint16_t mctrl;
53682 uint8_t state; /* HVSI protocol state */
53683 uint8_t flags;
53684@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
53685
53686 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
53687 packet.hdr.len = sizeof(struct hvsi_query_response);
53688- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53689+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53690 packet.verb = VSV_SEND_VERSION_NUMBER;
53691 packet.u.version = HVSI_VERSION;
53692 packet.query_seqno = query_seqno+1;
53693@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
53694
53695 packet.hdr.type = VS_QUERY_PACKET_HEADER;
53696 packet.hdr.len = sizeof(struct hvsi_query);
53697- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53698+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53699 packet.verb = verb;
53700
53701 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
53702@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
53703 int wrote;
53704
53705 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
53706- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53707+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53708 packet.hdr.len = sizeof(struct hvsi_control);
53709 packet.verb = VSV_SET_MODEM_CTL;
53710 packet.mask = HVSI_TSDTR;
53711@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
53712 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
53713
53714 packet.hdr.type = VS_DATA_PACKET_HEADER;
53715- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53716+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53717 packet.hdr.len = count + sizeof(struct hvsi_header);
53718 memcpy(&packet.data, buf, count);
53719
53720@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
53721 struct hvsi_control packet __ALIGNED__;
53722
53723 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
53724- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53725+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53726 packet.hdr.len = 6;
53727 packet.verb = VSV_CLOSE_PROTOCOL;
53728
53729@@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
53730
53731 tty_port_tty_set(&hp->port, tty);
53732 spin_lock_irqsave(&hp->lock, flags);
53733- hp->port.count++;
53734+ atomic_inc(&hp->port.count);
53735 atomic_set(&hp->seqno, 0);
53736 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
53737 spin_unlock_irqrestore(&hp->lock, flags);
53738@@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
53739
53740 spin_lock_irqsave(&hp->lock, flags);
53741
53742- if (--hp->port.count == 0) {
53743+ if (atomic_dec_return(&hp->port.count) == 0) {
53744 tty_port_tty_set(&hp->port, NULL);
53745 hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
53746
53747@@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
53748
53749 spin_lock_irqsave(&hp->lock, flags);
53750 }
53751- } else if (hp->port.count < 0)
53752+ } else if (atomic_read(&hp->port.count) < 0)
53753 printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
53754- hp - hvsi_ports, hp->port.count);
53755+ hp - hvsi_ports, atomic_read(&hp->port.count));
53756
53757 spin_unlock_irqrestore(&hp->lock, flags);
53758 }
53759@@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struct *tty)
53760 tty_port_tty_set(&hp->port, NULL);
53761
53762 spin_lock_irqsave(&hp->lock, flags);
53763- hp->port.count = 0;
53764+ atomic_set(&hp->port.count, 0);
53765 hp->n_outbuf = 0;
53766 spin_unlock_irqrestore(&hp->lock, flags);
53767 }
53768diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
53769index 7ae6c29..05c6dba 100644
53770--- a/drivers/tty/hvc/hvsi_lib.c
53771+++ b/drivers/tty/hvc/hvsi_lib.c
53772@@ -8,7 +8,7 @@
53773
53774 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
53775 {
53776- packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
53777+ packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
53778
53779 /* Assumes that always succeeds, works in practice */
53780 return pv->put_chars(pv->termno, (char *)packet, packet->len);
53781@@ -20,7 +20,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
53782
53783 /* Reset state */
53784 pv->established = 0;
53785- atomic_set(&pv->seqno, 0);
53786+ atomic_set_unchecked(&pv->seqno, 0);
53787
53788 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
53789
53790diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
53791index 345cebb..d5a1e9e 100644
53792--- a/drivers/tty/ipwireless/tty.c
53793+++ b/drivers/tty/ipwireless/tty.c
53794@@ -28,6 +28,7 @@
53795 #include <linux/tty_driver.h>
53796 #include <linux/tty_flip.h>
53797 #include <linux/uaccess.h>
53798+#include <asm/local.h>
53799
53800 #include "tty.h"
53801 #include "network.h"
53802@@ -93,10 +94,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
53803 return -ENODEV;
53804
53805 mutex_lock(&tty->ipw_tty_mutex);
53806- if (tty->port.count == 0)
53807+ if (atomic_read(&tty->port.count) == 0)
53808 tty->tx_bytes_queued = 0;
53809
53810- tty->port.count++;
53811+ atomic_inc(&tty->port.count);
53812
53813 tty->port.tty = linux_tty;
53814 linux_tty->driver_data = tty;
53815@@ -112,9 +113,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
53816
53817 static void do_ipw_close(struct ipw_tty *tty)
53818 {
53819- tty->port.count--;
53820-
53821- if (tty->port.count == 0) {
53822+ if (atomic_dec_return(&tty->port.count) == 0) {
53823 struct tty_struct *linux_tty = tty->port.tty;
53824
53825 if (linux_tty != NULL) {
53826@@ -135,7 +134,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
53827 return;
53828
53829 mutex_lock(&tty->ipw_tty_mutex);
53830- if (tty->port.count == 0) {
53831+ if (atomic_read(&tty->port.count) == 0) {
53832 mutex_unlock(&tty->ipw_tty_mutex);
53833 return;
53834 }
53835@@ -158,7 +157,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
53836
53837 mutex_lock(&tty->ipw_tty_mutex);
53838
53839- if (!tty->port.count) {
53840+ if (!atomic_read(&tty->port.count)) {
53841 mutex_unlock(&tty->ipw_tty_mutex);
53842 return;
53843 }
53844@@ -197,7 +196,7 @@ static int ipw_write(struct tty_struct *linux_tty,
53845 return -ENODEV;
53846
53847 mutex_lock(&tty->ipw_tty_mutex);
53848- if (!tty->port.count) {
53849+ if (!atomic_read(&tty->port.count)) {
53850 mutex_unlock(&tty->ipw_tty_mutex);
53851 return -EINVAL;
53852 }
53853@@ -237,7 +236,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
53854 if (!tty)
53855 return -ENODEV;
53856
53857- if (!tty->port.count)
53858+ if (!atomic_read(&tty->port.count))
53859 return -EINVAL;
53860
53861 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
53862@@ -279,7 +278,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
53863 if (!tty)
53864 return 0;
53865
53866- if (!tty->port.count)
53867+ if (!atomic_read(&tty->port.count))
53868 return 0;
53869
53870 return tty->tx_bytes_queued;
53871@@ -360,7 +359,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
53872 if (!tty)
53873 return -ENODEV;
53874
53875- if (!tty->port.count)
53876+ if (!atomic_read(&tty->port.count))
53877 return -EINVAL;
53878
53879 return get_control_lines(tty);
53880@@ -376,7 +375,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
53881 if (!tty)
53882 return -ENODEV;
53883
53884- if (!tty->port.count)
53885+ if (!atomic_read(&tty->port.count))
53886 return -EINVAL;
53887
53888 return set_control_lines(tty, set, clear);
53889@@ -390,7 +389,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
53890 if (!tty)
53891 return -ENODEV;
53892
53893- if (!tty->port.count)
53894+ if (!atomic_read(&tty->port.count))
53895 return -EINVAL;
53896
53897 /* FIXME: Exactly how is the tty object locked here .. */
53898@@ -546,7 +545,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
53899 * are gone */
53900 mutex_lock(&ttyj->ipw_tty_mutex);
53901 }
53902- while (ttyj->port.count)
53903+ while (atomic_read(&ttyj->port.count))
53904 do_ipw_close(ttyj);
53905 ipwireless_disassociate_network_ttys(network,
53906 ttyj->channel_idx);
53907diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
53908index 1deaca4..c8582d4 100644
53909--- a/drivers/tty/moxa.c
53910+++ b/drivers/tty/moxa.c
53911@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
53912 }
53913
53914 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
53915- ch->port.count++;
53916+ atomic_inc(&ch->port.count);
53917 tty->driver_data = ch;
53918 tty_port_tty_set(&ch->port, tty);
53919 mutex_lock(&ch->port.mutex);
53920diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
53921index c434376..114ce13 100644
53922--- a/drivers/tty/n_gsm.c
53923+++ b/drivers/tty/n_gsm.c
53924@@ -1644,7 +1644,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
53925 spin_lock_init(&dlci->lock);
53926 mutex_init(&dlci->mutex);
53927 dlci->fifo = &dlci->_fifo;
53928- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
53929+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
53930 kfree(dlci);
53931 return NULL;
53932 }
53933@@ -2958,7 +2958,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
53934 struct gsm_dlci *dlci = tty->driver_data;
53935 struct tty_port *port = &dlci->port;
53936
53937- port->count++;
53938+ atomic_inc(&port->count);
53939 tty_port_tty_set(port, tty);
53940
53941 dlci->modem_rx = 0;
53942diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
53943index f44f1ba..a8d5915 100644
53944--- a/drivers/tty/n_tty.c
53945+++ b/drivers/tty/n_tty.c
53946@@ -115,7 +115,7 @@ struct n_tty_data {
53947 int minimum_to_wake;
53948
53949 /* consumer-published */
53950- size_t read_tail;
53951+ size_t read_tail __intentional_overflow(-1);
53952 size_t line_start;
53953
53954 /* protected by output lock */
53955@@ -2517,6 +2517,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
53956 {
53957 *ops = tty_ldisc_N_TTY;
53958 ops->owner = NULL;
53959- ops->refcount = ops->flags = 0;
53960+ atomic_set(&ops->refcount, 0);
53961+ ops->flags = 0;
53962 }
53963 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
53964diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
53965index 9bbdb1d..dc514ee 100644
53966--- a/drivers/tty/pty.c
53967+++ b/drivers/tty/pty.c
53968@@ -789,8 +789,10 @@ static void __init unix98_pty_init(void)
53969 panic("Couldn't register Unix98 pts driver");
53970
53971 /* Now create the /dev/ptmx special device */
53972+ pax_open_kernel();
53973 tty_default_fops(&ptmx_fops);
53974- ptmx_fops.open = ptmx_open;
53975+ *(void **)&ptmx_fops.open = ptmx_open;
53976+ pax_close_kernel();
53977
53978 cdev_init(&ptmx_cdev, &ptmx_fops);
53979 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
53980diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
53981index 383c4c7..d408e21 100644
53982--- a/drivers/tty/rocket.c
53983+++ b/drivers/tty/rocket.c
53984@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
53985 tty->driver_data = info;
53986 tty_port_tty_set(port, tty);
53987
53988- if (port->count++ == 0) {
53989+ if (atomic_inc_return(&port->count) == 1) {
53990 atomic_inc(&rp_num_ports_open);
53991
53992 #ifdef ROCKET_DEBUG_OPEN
53993@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
53994 #endif
53995 }
53996 #ifdef ROCKET_DEBUG_OPEN
53997- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
53998+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
53999 #endif
54000
54001 /*
54002@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
54003 spin_unlock_irqrestore(&info->port.lock, flags);
54004 return;
54005 }
54006- if (info->port.count)
54007+ if (atomic_read(&info->port.count))
54008 atomic_dec(&rp_num_ports_open);
54009 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
54010 spin_unlock_irqrestore(&info->port.lock, flags);
54011diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
54012index aa28209..e08fb85 100644
54013--- a/drivers/tty/serial/ioc4_serial.c
54014+++ b/drivers/tty/serial/ioc4_serial.c
54015@@ -437,7 +437,7 @@ struct ioc4_soft {
54016 } is_intr_info[MAX_IOC4_INTR_ENTS];
54017
54018 /* Number of entries active in the above array */
54019- atomic_t is_num_intrs;
54020+ atomic_unchecked_t is_num_intrs;
54021 } is_intr_type[IOC4_NUM_INTR_TYPES];
54022
54023 /* is_ir_lock must be held while
54024@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
54025 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
54026 || (type == IOC4_OTHER_INTR_TYPE)));
54027
54028- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
54029+ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
54030 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
54031
54032 /* Save off the lower level interrupt handler */
54033@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
54034
54035 soft = arg;
54036 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
54037- num_intrs = (int)atomic_read(
54038+ num_intrs = (int)atomic_read_unchecked(
54039 &soft->is_intr_type[intr_type].is_num_intrs);
54040
54041 this_mir = this_ir = pending_intrs(soft, intr_type);
54042diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
54043index 6ec7501..265bcbf 100644
54044--- a/drivers/tty/serial/kgdb_nmi.c
54045+++ b/drivers/tty/serial/kgdb_nmi.c
54046@@ -51,7 +51,9 @@ static int kgdb_nmi_console_setup(struct console *co, char *options)
54047 * I/O utilities that messages sent to the console will automatically
54048 * be displayed on the dbg_io.
54049 */
54050- dbg_io_ops->is_console = true;
54051+ pax_open_kernel();
54052+ *(int *)&dbg_io_ops->is_console = true;
54053+ pax_close_kernel();
54054
54055 return 0;
54056 }
54057diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
54058index a260cde..6b2b5ce 100644
54059--- a/drivers/tty/serial/kgdboc.c
54060+++ b/drivers/tty/serial/kgdboc.c
54061@@ -24,8 +24,9 @@
54062 #define MAX_CONFIG_LEN 40
54063
54064 static struct kgdb_io kgdboc_io_ops;
54065+static struct kgdb_io kgdboc_io_ops_console;
54066
54067-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
54068+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
54069 static int configured = -1;
54070
54071 static char config[MAX_CONFIG_LEN];
54072@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
54073 kgdboc_unregister_kbd();
54074 if (configured == 1)
54075 kgdb_unregister_io_module(&kgdboc_io_ops);
54076+ else if (configured == 2)
54077+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
54078 }
54079
54080 static int configure_kgdboc(void)
54081@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
54082 int err;
54083 char *cptr = config;
54084 struct console *cons;
54085+ int is_console = 0;
54086
54087 err = kgdboc_option_setup(config);
54088 if (err || !strlen(config) || isspace(config[0]))
54089 goto noconfig;
54090
54091 err = -ENODEV;
54092- kgdboc_io_ops.is_console = 0;
54093 kgdb_tty_driver = NULL;
54094
54095 kgdboc_use_kms = 0;
54096@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
54097 int idx;
54098 if (cons->device && cons->device(cons, &idx) == p &&
54099 idx == tty_line) {
54100- kgdboc_io_ops.is_console = 1;
54101+ is_console = 1;
54102 break;
54103 }
54104 cons = cons->next;
54105@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
54106 kgdb_tty_line = tty_line;
54107
54108 do_register:
54109- err = kgdb_register_io_module(&kgdboc_io_ops);
54110+ if (is_console) {
54111+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
54112+ configured = 2;
54113+ } else {
54114+ err = kgdb_register_io_module(&kgdboc_io_ops);
54115+ configured = 1;
54116+ }
54117 if (err)
54118 goto noconfig;
54119
54120@@ -205,8 +214,6 @@ do_register:
54121 if (err)
54122 goto nmi_con_failed;
54123
54124- configured = 1;
54125-
54126 return 0;
54127
54128 nmi_con_failed:
54129@@ -223,7 +230,7 @@ noconfig:
54130 static int __init init_kgdboc(void)
54131 {
54132 /* Already configured? */
54133- if (configured == 1)
54134+ if (configured >= 1)
54135 return 0;
54136
54137 return configure_kgdboc();
54138@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
54139 if (config[len - 1] == '\n')
54140 config[len - 1] = '\0';
54141
54142- if (configured == 1)
54143+ if (configured >= 1)
54144 cleanup_kgdboc();
54145
54146 /* Go and configure with the new params. */
54147@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
54148 .post_exception = kgdboc_post_exp_handler,
54149 };
54150
54151+static struct kgdb_io kgdboc_io_ops_console = {
54152+ .name = "kgdboc",
54153+ .read_char = kgdboc_get_char,
54154+ .write_char = kgdboc_put_char,
54155+ .pre_exception = kgdboc_pre_exp_handler,
54156+ .post_exception = kgdboc_post_exp_handler,
54157+ .is_console = 1
54158+};
54159+
54160 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
54161 /* This is only available if kgdboc is a built in for early debugging */
54162 static int __init kgdboc_early_init(char *opt)
54163diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
54164index 0da0b54..80ae306 100644
54165--- a/drivers/tty/serial/msm_serial.c
54166+++ b/drivers/tty/serial/msm_serial.c
54167@@ -989,7 +989,7 @@ static struct uart_driver msm_uart_driver = {
54168 .cons = MSM_CONSOLE,
54169 };
54170
54171-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
54172+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
54173
54174 static const struct of_device_id msm_uartdm_table[] = {
54175 { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
54176@@ -1008,7 +1008,7 @@ static int msm_serial_probe(struct platform_device *pdev)
54177 int irq;
54178
54179 if (pdev->id == -1)
54180- pdev->id = atomic_inc_return(&msm_uart_next_id) - 1;
54181+ pdev->id = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
54182
54183 if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
54184 return -ENXIO;
54185diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
54186index c78f43a..22b1dab 100644
54187--- a/drivers/tty/serial/samsung.c
54188+++ b/drivers/tty/serial/samsung.c
54189@@ -478,11 +478,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
54190 }
54191 }
54192
54193+static int s3c64xx_serial_startup(struct uart_port *port);
54194 static int s3c24xx_serial_startup(struct uart_port *port)
54195 {
54196 struct s3c24xx_uart_port *ourport = to_ourport(port);
54197 int ret;
54198
54199+ /* Startup sequence is different for s3c64xx and higher SoC's */
54200+ if (s3c24xx_serial_has_interrupt_mask(port))
54201+ return s3c64xx_serial_startup(port);
54202+
54203 dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n",
54204 port, (unsigned long long)port->mapbase, port->membase);
54205
54206@@ -1155,10 +1160,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
54207 /* setup info for port */
54208 port->dev = &platdev->dev;
54209
54210- /* Startup sequence is different for s3c64xx and higher SoC's */
54211- if (s3c24xx_serial_has_interrupt_mask(port))
54212- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
54213-
54214 port->uartclk = 1;
54215
54216 if (cfg->uart_flags & UPF_CONS_FLOW) {
54217diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
54218index 29a7be4..0144e62 100644
54219--- a/drivers/tty/serial/serial_core.c
54220+++ b/drivers/tty/serial/serial_core.c
54221@@ -1343,7 +1343,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
54222
54223 pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
54224
54225- if (!port->count || tty_port_close_start(port, tty, filp) == 0)
54226+ if (!atomic_read(&port->count) || tty_port_close_start(port, tty, filp) == 0)
54227 return;
54228
54229 /*
54230@@ -1470,7 +1470,7 @@ static void uart_hangup(struct tty_struct *tty)
54231 uart_flush_buffer(tty);
54232 uart_shutdown(tty, state);
54233 spin_lock_irqsave(&port->lock, flags);
54234- port->count = 0;
54235+ atomic_set(&port->count, 0);
54236 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
54237 spin_unlock_irqrestore(&port->lock, flags);
54238 tty_port_tty_set(port, NULL);
54239@@ -1568,7 +1568,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
54240 goto end;
54241 }
54242
54243- port->count++;
54244+ atomic_inc(&port->count);
54245 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
54246 retval = -ENXIO;
54247 goto err_dec_count;
54248@@ -1600,7 +1600,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
54249 end:
54250 return retval;
54251 err_dec_count:
54252- port->count--;
54253+ atomic_inc(&port->count);
54254 mutex_unlock(&port->mutex);
54255 goto end;
54256 }
54257diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
54258index b799170..87dafd5 100644
54259--- a/drivers/tty/synclink.c
54260+++ b/drivers/tty/synclink.c
54261@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
54262
54263 if (debug_level >= DEBUG_LEVEL_INFO)
54264 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
54265- __FILE__,__LINE__, info->device_name, info->port.count);
54266+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
54267
54268 if (tty_port_close_start(&info->port, tty, filp) == 0)
54269 goto cleanup;
54270@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
54271 cleanup:
54272 if (debug_level >= DEBUG_LEVEL_INFO)
54273 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
54274- tty->driver->name, info->port.count);
54275+ tty->driver->name, atomic_read(&info->port.count));
54276
54277 } /* end of mgsl_close() */
54278
54279@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
54280
54281 mgsl_flush_buffer(tty);
54282 shutdown(info);
54283-
54284- info->port.count = 0;
54285+
54286+ atomic_set(&info->port.count, 0);
54287 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54288 info->port.tty = NULL;
54289
54290@@ -3296,10 +3296,10 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54291
54292 if (debug_level >= DEBUG_LEVEL_INFO)
54293 printk("%s(%d):block_til_ready before block on %s count=%d\n",
54294- __FILE__,__LINE__, tty->driver->name, port->count );
54295+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54296
54297 spin_lock_irqsave(&info->irq_spinlock, flags);
54298- port->count--;
54299+ atomic_dec(&port->count);
54300 spin_unlock_irqrestore(&info->irq_spinlock, flags);
54301 port->blocked_open++;
54302
54303@@ -3327,7 +3327,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54304
54305 if (debug_level >= DEBUG_LEVEL_INFO)
54306 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
54307- __FILE__,__LINE__, tty->driver->name, port->count );
54308+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54309
54310 tty_unlock(tty);
54311 schedule();
54312@@ -3339,12 +3339,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54313
54314 /* FIXME: Racy on hangup during close wait */
54315 if (!tty_hung_up_p(filp))
54316- port->count++;
54317+ atomic_inc(&port->count);
54318 port->blocked_open--;
54319
54320 if (debug_level >= DEBUG_LEVEL_INFO)
54321 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
54322- __FILE__,__LINE__, tty->driver->name, port->count );
54323+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54324
54325 if (!retval)
54326 port->flags |= ASYNC_NORMAL_ACTIVE;
54327@@ -3396,7 +3396,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
54328
54329 if (debug_level >= DEBUG_LEVEL_INFO)
54330 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
54331- __FILE__,__LINE__,tty->driver->name, info->port.count);
54332+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
54333
54334 /* If port is closing, signal caller to try again */
54335 if (info->port.flags & ASYNC_CLOSING){
54336@@ -3415,10 +3415,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
54337 spin_unlock_irqrestore(&info->netlock, flags);
54338 goto cleanup;
54339 }
54340- info->port.count++;
54341+ atomic_inc(&info->port.count);
54342 spin_unlock_irqrestore(&info->netlock, flags);
54343
54344- if (info->port.count == 1) {
54345+ if (atomic_read(&info->port.count) == 1) {
54346 /* 1st open on this device, init hardware */
54347 retval = startup(info);
54348 if (retval < 0)
54349@@ -3442,8 +3442,8 @@ cleanup:
54350 if (retval) {
54351 if (tty->count == 1)
54352 info->port.tty = NULL; /* tty layer will release tty struct */
54353- if(info->port.count)
54354- info->port.count--;
54355+ if (atomic_read(&info->port.count))
54356+ atomic_dec(&info->port.count);
54357 }
54358
54359 return retval;
54360@@ -7661,7 +7661,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
54361 unsigned short new_crctype;
54362
54363 /* return error if TTY interface open */
54364- if (info->port.count)
54365+ if (atomic_read(&info->port.count))
54366 return -EBUSY;
54367
54368 switch (encoding)
54369@@ -7756,7 +7756,7 @@ static int hdlcdev_open(struct net_device *dev)
54370
54371 /* arbitrate between network and tty opens */
54372 spin_lock_irqsave(&info->netlock, flags);
54373- if (info->port.count != 0 || info->netcount != 0) {
54374+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
54375 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
54376 spin_unlock_irqrestore(&info->netlock, flags);
54377 return -EBUSY;
54378@@ -7842,7 +7842,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
54379 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
54380
54381 /* return error if TTY interface open */
54382- if (info->port.count)
54383+ if (atomic_read(&info->port.count))
54384 return -EBUSY;
54385
54386 if (cmd != SIOCWANDEV)
54387diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
54388index 0e8c39b..e0cb171 100644
54389--- a/drivers/tty/synclink_gt.c
54390+++ b/drivers/tty/synclink_gt.c
54391@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
54392 tty->driver_data = info;
54393 info->port.tty = tty;
54394
54395- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
54396+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
54397
54398 /* If port is closing, signal caller to try again */
54399 if (info->port.flags & ASYNC_CLOSING){
54400@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
54401 mutex_unlock(&info->port.mutex);
54402 goto cleanup;
54403 }
54404- info->port.count++;
54405+ atomic_inc(&info->port.count);
54406 spin_unlock_irqrestore(&info->netlock, flags);
54407
54408- if (info->port.count == 1) {
54409+ if (atomic_read(&info->port.count) == 1) {
54410 /* 1st open on this device, init hardware */
54411 retval = startup(info);
54412 if (retval < 0) {
54413@@ -715,8 +715,8 @@ cleanup:
54414 if (retval) {
54415 if (tty->count == 1)
54416 info->port.tty = NULL; /* tty layer will release tty struct */
54417- if(info->port.count)
54418- info->port.count--;
54419+ if(atomic_read(&info->port.count))
54420+ atomic_dec(&info->port.count);
54421 }
54422
54423 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
54424@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54425
54426 if (sanity_check(info, tty->name, "close"))
54427 return;
54428- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
54429+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
54430
54431 if (tty_port_close_start(&info->port, tty, filp) == 0)
54432 goto cleanup;
54433@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54434 tty_port_close_end(&info->port, tty);
54435 info->port.tty = NULL;
54436 cleanup:
54437- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
54438+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
54439 }
54440
54441 static void hangup(struct tty_struct *tty)
54442@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
54443 shutdown(info);
54444
54445 spin_lock_irqsave(&info->port.lock, flags);
54446- info->port.count = 0;
54447+ atomic_set(&info->port.count, 0);
54448 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54449 info->port.tty = NULL;
54450 spin_unlock_irqrestore(&info->port.lock, flags);
54451@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
54452 unsigned short new_crctype;
54453
54454 /* return error if TTY interface open */
54455- if (info->port.count)
54456+ if (atomic_read(&info->port.count))
54457 return -EBUSY;
54458
54459 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
54460@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
54461
54462 /* arbitrate between network and tty opens */
54463 spin_lock_irqsave(&info->netlock, flags);
54464- if (info->port.count != 0 || info->netcount != 0) {
54465+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
54466 DBGINFO(("%s hdlc_open busy\n", dev->name));
54467 spin_unlock_irqrestore(&info->netlock, flags);
54468 return -EBUSY;
54469@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
54470 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
54471
54472 /* return error if TTY interface open */
54473- if (info->port.count)
54474+ if (atomic_read(&info->port.count))
54475 return -EBUSY;
54476
54477 if (cmd != SIOCWANDEV)
54478@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
54479 if (port == NULL)
54480 continue;
54481 spin_lock(&port->lock);
54482- if ((port->port.count || port->netcount) &&
54483+ if ((atomic_read(&port->port.count) || port->netcount) &&
54484 port->pending_bh && !port->bh_running &&
54485 !port->bh_requested) {
54486 DBGISR(("%s bh queued\n", port->device_name));
54487@@ -3299,7 +3299,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54488 add_wait_queue(&port->open_wait, &wait);
54489
54490 spin_lock_irqsave(&info->lock, flags);
54491- port->count--;
54492+ atomic_dec(&port->count);
54493 spin_unlock_irqrestore(&info->lock, flags);
54494 port->blocked_open++;
54495
54496@@ -3335,7 +3335,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54497 remove_wait_queue(&port->open_wait, &wait);
54498
54499 if (!tty_hung_up_p(filp))
54500- port->count++;
54501+ atomic_inc(&port->count);
54502 port->blocked_open--;
54503
54504 if (!retval)
54505diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
54506index c3f9091..abe4601 100644
54507--- a/drivers/tty/synclinkmp.c
54508+++ b/drivers/tty/synclinkmp.c
54509@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
54510
54511 if (debug_level >= DEBUG_LEVEL_INFO)
54512 printk("%s(%d):%s open(), old ref count = %d\n",
54513- __FILE__,__LINE__,tty->driver->name, info->port.count);
54514+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
54515
54516 /* If port is closing, signal caller to try again */
54517 if (info->port.flags & ASYNC_CLOSING){
54518@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
54519 spin_unlock_irqrestore(&info->netlock, flags);
54520 goto cleanup;
54521 }
54522- info->port.count++;
54523+ atomic_inc(&info->port.count);
54524 spin_unlock_irqrestore(&info->netlock, flags);
54525
54526- if (info->port.count == 1) {
54527+ if (atomic_read(&info->port.count) == 1) {
54528 /* 1st open on this device, init hardware */
54529 retval = startup(info);
54530 if (retval < 0)
54531@@ -796,8 +796,8 @@ cleanup:
54532 if (retval) {
54533 if (tty->count == 1)
54534 info->port.tty = NULL; /* tty layer will release tty struct */
54535- if(info->port.count)
54536- info->port.count--;
54537+ if(atomic_read(&info->port.count))
54538+ atomic_dec(&info->port.count);
54539 }
54540
54541 return retval;
54542@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54543
54544 if (debug_level >= DEBUG_LEVEL_INFO)
54545 printk("%s(%d):%s close() entry, count=%d\n",
54546- __FILE__,__LINE__, info->device_name, info->port.count);
54547+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
54548
54549 if (tty_port_close_start(&info->port, tty, filp) == 0)
54550 goto cleanup;
54551@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54552 cleanup:
54553 if (debug_level >= DEBUG_LEVEL_INFO)
54554 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
54555- tty->driver->name, info->port.count);
54556+ tty->driver->name, atomic_read(&info->port.count));
54557 }
54558
54559 /* Called by tty_hangup() when a hangup is signaled.
54560@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
54561 shutdown(info);
54562
54563 spin_lock_irqsave(&info->port.lock, flags);
54564- info->port.count = 0;
54565+ atomic_set(&info->port.count, 0);
54566 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54567 info->port.tty = NULL;
54568 spin_unlock_irqrestore(&info->port.lock, flags);
54569@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
54570 unsigned short new_crctype;
54571
54572 /* return error if TTY interface open */
54573- if (info->port.count)
54574+ if (atomic_read(&info->port.count))
54575 return -EBUSY;
54576
54577 switch (encoding)
54578@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
54579
54580 /* arbitrate between network and tty opens */
54581 spin_lock_irqsave(&info->netlock, flags);
54582- if (info->port.count != 0 || info->netcount != 0) {
54583+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
54584 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
54585 spin_unlock_irqrestore(&info->netlock, flags);
54586 return -EBUSY;
54587@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
54588 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
54589
54590 /* return error if TTY interface open */
54591- if (info->port.count)
54592+ if (atomic_read(&info->port.count))
54593 return -EBUSY;
54594
54595 if (cmd != SIOCWANDEV)
54596@@ -2621,7 +2621,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
54597 * do not request bottom half processing if the
54598 * device is not open in a normal mode.
54599 */
54600- if ( port && (port->port.count || port->netcount) &&
54601+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
54602 port->pending_bh && !port->bh_running &&
54603 !port->bh_requested ) {
54604 if ( debug_level >= DEBUG_LEVEL_ISR )
54605@@ -3318,10 +3318,10 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54606
54607 if (debug_level >= DEBUG_LEVEL_INFO)
54608 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
54609- __FILE__,__LINE__, tty->driver->name, port->count );
54610+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54611
54612 spin_lock_irqsave(&info->lock, flags);
54613- port->count--;
54614+ atomic_dec(&port->count);
54615 spin_unlock_irqrestore(&info->lock, flags);
54616 port->blocked_open++;
54617
54618@@ -3349,7 +3349,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54619
54620 if (debug_level >= DEBUG_LEVEL_INFO)
54621 printk("%s(%d):%s block_til_ready() count=%d\n",
54622- __FILE__,__LINE__, tty->driver->name, port->count );
54623+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54624
54625 tty_unlock(tty);
54626 schedule();
54627@@ -3359,12 +3359,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54628 set_current_state(TASK_RUNNING);
54629 remove_wait_queue(&port->open_wait, &wait);
54630 if (!tty_hung_up_p(filp))
54631- port->count++;
54632+ atomic_inc(&port->count);
54633 port->blocked_open--;
54634
54635 if (debug_level >= DEBUG_LEVEL_INFO)
54636 printk("%s(%d):%s block_til_ready() after, count=%d\n",
54637- __FILE__,__LINE__, tty->driver->name, port->count );
54638+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54639
54640 if (!retval)
54641 port->flags |= ASYNC_NORMAL_ACTIVE;
54642diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
54643index 42bad18..447d7a2 100644
54644--- a/drivers/tty/sysrq.c
54645+++ b/drivers/tty/sysrq.c
54646@@ -1084,7 +1084,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
54647 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
54648 size_t count, loff_t *ppos)
54649 {
54650- if (count) {
54651+ if (count && capable(CAP_SYS_ADMIN)) {
54652 char c;
54653
54654 if (get_user(c, buf))
54655diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
54656index 8fbad34..0db0a39 100644
54657--- a/drivers/tty/tty_io.c
54658+++ b/drivers/tty/tty_io.c
54659@@ -3464,7 +3464,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
54660
54661 void tty_default_fops(struct file_operations *fops)
54662 {
54663- *fops = tty_fops;
54664+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
54665 }
54666
54667 /*
54668diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
54669index 2d822aa..a566234 100644
54670--- a/drivers/tty/tty_ldisc.c
54671+++ b/drivers/tty/tty_ldisc.c
54672@@ -71,7 +71,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
54673 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
54674 tty_ldiscs[disc] = new_ldisc;
54675 new_ldisc->num = disc;
54676- new_ldisc->refcount = 0;
54677+ atomic_set(&new_ldisc->refcount, 0);
54678 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
54679
54680 return ret;
54681@@ -99,7 +99,7 @@ int tty_unregister_ldisc(int disc)
54682 return -EINVAL;
54683
54684 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
54685- if (tty_ldiscs[disc]->refcount)
54686+ if (atomic_read(&tty_ldiscs[disc]->refcount))
54687 ret = -EBUSY;
54688 else
54689 tty_ldiscs[disc] = NULL;
54690@@ -120,7 +120,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
54691 if (ldops) {
54692 ret = ERR_PTR(-EAGAIN);
54693 if (try_module_get(ldops->owner)) {
54694- ldops->refcount++;
54695+ atomic_inc(&ldops->refcount);
54696 ret = ldops;
54697 }
54698 }
54699@@ -133,7 +133,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
54700 unsigned long flags;
54701
54702 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
54703- ldops->refcount--;
54704+ atomic_dec(&ldops->refcount);
54705 module_put(ldops->owner);
54706 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
54707 }
54708diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
54709index 1b93357..ea9f82c 100644
54710--- a/drivers/tty/tty_port.c
54711+++ b/drivers/tty/tty_port.c
54712@@ -237,7 +237,7 @@ void tty_port_hangup(struct tty_port *port)
54713 unsigned long flags;
54714
54715 spin_lock_irqsave(&port->lock, flags);
54716- port->count = 0;
54717+ atomic_set(&port->count, 0);
54718 port->flags &= ~ASYNC_NORMAL_ACTIVE;
54719 tty = port->tty;
54720 if (tty)
54721@@ -399,7 +399,7 @@ int tty_port_block_til_ready(struct tty_port *port,
54722
54723 /* The port lock protects the port counts */
54724 spin_lock_irqsave(&port->lock, flags);
54725- port->count--;
54726+ atomic_dec(&port->count);
54727 port->blocked_open++;
54728 spin_unlock_irqrestore(&port->lock, flags);
54729
54730@@ -441,7 +441,7 @@ int tty_port_block_til_ready(struct tty_port *port,
54731 we must not mess that up further */
54732 spin_lock_irqsave(&port->lock, flags);
54733 if (!tty_hung_up_p(filp))
54734- port->count++;
54735+ atomic_inc(&port->count);
54736 port->blocked_open--;
54737 if (retval == 0)
54738 port->flags |= ASYNC_NORMAL_ACTIVE;
54739@@ -479,19 +479,19 @@ int tty_port_close_start(struct tty_port *port,
54740 return 0;
54741 }
54742
54743- if (tty->count == 1 && port->count != 1) {
54744+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
54745 printk(KERN_WARNING
54746 "tty_port_close_start: tty->count = 1 port count = %d.\n",
54747- port->count);
54748- port->count = 1;
54749+ atomic_read(&port->count));
54750+ atomic_set(&port->count, 1);
54751 }
54752- if (--port->count < 0) {
54753+ if (atomic_dec_return(&port->count) < 0) {
54754 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
54755- port->count);
54756- port->count = 0;
54757+ atomic_read(&port->count));
54758+ atomic_set(&port->count, 0);
54759 }
54760
54761- if (port->count) {
54762+ if (atomic_read(&port->count)) {
54763 spin_unlock_irqrestore(&port->lock, flags);
54764 return 0;
54765 }
54766@@ -592,7 +592,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
54767 struct file *filp)
54768 {
54769 spin_lock_irq(&port->lock);
54770- ++port->count;
54771+ atomic_inc(&port->count);
54772 spin_unlock_irq(&port->lock);
54773 tty_port_tty_set(port, tty);
54774
54775diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
54776index d0e3a44..5f8b754 100644
54777--- a/drivers/tty/vt/keyboard.c
54778+++ b/drivers/tty/vt/keyboard.c
54779@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
54780 kbd->kbdmode == VC_OFF) &&
54781 value != KVAL(K_SAK))
54782 return; /* SAK is allowed even in raw mode */
54783+
54784+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
54785+ {
54786+ void *func = fn_handler[value];
54787+ if (func == fn_show_state || func == fn_show_ptregs ||
54788+ func == fn_show_mem)
54789+ return;
54790+ }
54791+#endif
54792+
54793 fn_handler[value](vc);
54794 }
54795
54796@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
54797 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
54798 return -EFAULT;
54799
54800- if (!capable(CAP_SYS_TTY_CONFIG))
54801- perm = 0;
54802-
54803 switch (cmd) {
54804 case KDGKBENT:
54805 /* Ensure another thread doesn't free it under us */
54806@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
54807 spin_unlock_irqrestore(&kbd_event_lock, flags);
54808 return put_user(val, &user_kbe->kb_value);
54809 case KDSKBENT:
54810+ if (!capable(CAP_SYS_TTY_CONFIG))
54811+ perm = 0;
54812+
54813 if (!perm)
54814 return -EPERM;
54815 if (!i && v == K_NOSUCHMAP) {
54816@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
54817 int i, j, k;
54818 int ret;
54819
54820- if (!capable(CAP_SYS_TTY_CONFIG))
54821- perm = 0;
54822-
54823 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
54824 if (!kbs) {
54825 ret = -ENOMEM;
54826@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
54827 kfree(kbs);
54828 return ((p && *p) ? -EOVERFLOW : 0);
54829 case KDSKBSENT:
54830+ if (!capable(CAP_SYS_TTY_CONFIG))
54831+ perm = 0;
54832+
54833 if (!perm) {
54834 ret = -EPERM;
54835 goto reterr;
54836diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
54837index a673e5b..36e5d32 100644
54838--- a/drivers/uio/uio.c
54839+++ b/drivers/uio/uio.c
54840@@ -25,6 +25,7 @@
54841 #include <linux/kobject.h>
54842 #include <linux/cdev.h>
54843 #include <linux/uio_driver.h>
54844+#include <asm/local.h>
54845
54846 #define UIO_MAX_DEVICES (1U << MINORBITS)
54847
54848@@ -32,7 +33,7 @@ struct uio_device {
54849 struct module *owner;
54850 struct device *dev;
54851 int minor;
54852- atomic_t event;
54853+ atomic_unchecked_t event;
54854 struct fasync_struct *async_queue;
54855 wait_queue_head_t wait;
54856 struct uio_info *info;
54857@@ -243,7 +244,7 @@ static ssize_t event_show(struct device *dev,
54858 struct device_attribute *attr, char *buf)
54859 {
54860 struct uio_device *idev = dev_get_drvdata(dev);
54861- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
54862+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
54863 }
54864 static DEVICE_ATTR_RO(event);
54865
54866@@ -405,7 +406,7 @@ void uio_event_notify(struct uio_info *info)
54867 {
54868 struct uio_device *idev = info->uio_dev;
54869
54870- atomic_inc(&idev->event);
54871+ atomic_inc_unchecked(&idev->event);
54872 wake_up_interruptible(&idev->wait);
54873 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
54874 }
54875@@ -458,7 +459,7 @@ static int uio_open(struct inode *inode, struct file *filep)
54876 }
54877
54878 listener->dev = idev;
54879- listener->event_count = atomic_read(&idev->event);
54880+ listener->event_count = atomic_read_unchecked(&idev->event);
54881 filep->private_data = listener;
54882
54883 if (idev->info->open) {
54884@@ -509,7 +510,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
54885 return -EIO;
54886
54887 poll_wait(filep, &idev->wait, wait);
54888- if (listener->event_count != atomic_read(&idev->event))
54889+ if (listener->event_count != atomic_read_unchecked(&idev->event))
54890 return POLLIN | POLLRDNORM;
54891 return 0;
54892 }
54893@@ -534,7 +535,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
54894 do {
54895 set_current_state(TASK_INTERRUPTIBLE);
54896
54897- event_count = atomic_read(&idev->event);
54898+ event_count = atomic_read_unchecked(&idev->event);
54899 if (event_count != listener->event_count) {
54900 if (copy_to_user(buf, &event_count, count))
54901 retval = -EFAULT;
54902@@ -591,9 +592,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
54903 static int uio_find_mem_index(struct vm_area_struct *vma)
54904 {
54905 struct uio_device *idev = vma->vm_private_data;
54906+ unsigned long size;
54907
54908 if (vma->vm_pgoff < MAX_UIO_MAPS) {
54909- if (idev->info->mem[vma->vm_pgoff].size == 0)
54910+ size = idev->info->mem[vma->vm_pgoff].size;
54911+ if (size == 0)
54912+ return -1;
54913+ if (vma->vm_end - vma->vm_start > size)
54914 return -1;
54915 return (int)vma->vm_pgoff;
54916 }
54917@@ -825,7 +830,7 @@ int __uio_register_device(struct module *owner,
54918 idev->owner = owner;
54919 idev->info = info;
54920 init_waitqueue_head(&idev->wait);
54921- atomic_set(&idev->event, 0);
54922+ atomic_set_unchecked(&idev->event, 0);
54923
54924 ret = uio_get_minor(idev);
54925 if (ret)
54926diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
54927index 813d4d3..a71934f 100644
54928--- a/drivers/usb/atm/cxacru.c
54929+++ b/drivers/usb/atm/cxacru.c
54930@@ -472,7 +472,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
54931 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
54932 if (ret < 2)
54933 return -EINVAL;
54934- if (index < 0 || index > 0x7f)
54935+ if (index > 0x7f)
54936 return -EINVAL;
54937 pos += tmp;
54938
54939diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
54940index dada014..1d0d517 100644
54941--- a/drivers/usb/atm/usbatm.c
54942+++ b/drivers/usb/atm/usbatm.c
54943@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54944 if (printk_ratelimit())
54945 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
54946 __func__, vpi, vci);
54947- atomic_inc(&vcc->stats->rx_err);
54948+ atomic_inc_unchecked(&vcc->stats->rx_err);
54949 return;
54950 }
54951
54952@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54953 if (length > ATM_MAX_AAL5_PDU) {
54954 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
54955 __func__, length, vcc);
54956- atomic_inc(&vcc->stats->rx_err);
54957+ atomic_inc_unchecked(&vcc->stats->rx_err);
54958 goto out;
54959 }
54960
54961@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54962 if (sarb->len < pdu_length) {
54963 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
54964 __func__, pdu_length, sarb->len, vcc);
54965- atomic_inc(&vcc->stats->rx_err);
54966+ atomic_inc_unchecked(&vcc->stats->rx_err);
54967 goto out;
54968 }
54969
54970 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
54971 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
54972 __func__, vcc);
54973- atomic_inc(&vcc->stats->rx_err);
54974+ atomic_inc_unchecked(&vcc->stats->rx_err);
54975 goto out;
54976 }
54977
54978@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54979 if (printk_ratelimit())
54980 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
54981 __func__, length);
54982- atomic_inc(&vcc->stats->rx_drop);
54983+ atomic_inc_unchecked(&vcc->stats->rx_drop);
54984 goto out;
54985 }
54986
54987@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54988
54989 vcc->push(vcc, skb);
54990
54991- atomic_inc(&vcc->stats->rx);
54992+ atomic_inc_unchecked(&vcc->stats->rx);
54993 out:
54994 skb_trim(sarb, 0);
54995 }
54996@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
54997 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
54998
54999 usbatm_pop(vcc, skb);
55000- atomic_inc(&vcc->stats->tx);
55001+ atomic_inc_unchecked(&vcc->stats->tx);
55002
55003 skb = skb_dequeue(&instance->sndqueue);
55004 }
55005@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page
55006 if (!left--)
55007 return sprintf(page,
55008 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
55009- atomic_read(&atm_dev->stats.aal5.tx),
55010- atomic_read(&atm_dev->stats.aal5.tx_err),
55011- atomic_read(&atm_dev->stats.aal5.rx),
55012- atomic_read(&atm_dev->stats.aal5.rx_err),
55013- atomic_read(&atm_dev->stats.aal5.rx_drop));
55014+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
55015+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
55016+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
55017+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
55018+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
55019
55020 if (!left--) {
55021 if (instance->disconnected)
55022diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
55023index 2a3bbdf..91d72cf 100644
55024--- a/drivers/usb/core/devices.c
55025+++ b/drivers/usb/core/devices.c
55026@@ -126,7 +126,7 @@ static const char format_endpt[] =
55027 * time it gets called.
55028 */
55029 static struct device_connect_event {
55030- atomic_t count;
55031+ atomic_unchecked_t count;
55032 wait_queue_head_t wait;
55033 } device_event = {
55034 .count = ATOMIC_INIT(1),
55035@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
55036
55037 void usbfs_conn_disc_event(void)
55038 {
55039- atomic_add(2, &device_event.count);
55040+ atomic_add_unchecked(2, &device_event.count);
55041 wake_up(&device_event.wait);
55042 }
55043
55044@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
55045
55046 poll_wait(file, &device_event.wait, wait);
55047
55048- event_count = atomic_read(&device_event.count);
55049+ event_count = atomic_read_unchecked(&device_event.count);
55050 if (file->f_version != event_count) {
55051 file->f_version = event_count;
55052 return POLLIN | POLLRDNORM;
55053diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
55054index 0b59731..46ee7d1 100644
55055--- a/drivers/usb/core/devio.c
55056+++ b/drivers/usb/core/devio.c
55057@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
55058 struct usb_dev_state *ps = file->private_data;
55059 struct usb_device *dev = ps->dev;
55060 ssize_t ret = 0;
55061- unsigned len;
55062+ size_t len;
55063 loff_t pos;
55064 int i;
55065
55066@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
55067 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
55068 struct usb_config_descriptor *config =
55069 (struct usb_config_descriptor *)dev->rawdescriptors[i];
55070- unsigned int length = le16_to_cpu(config->wTotalLength);
55071+ size_t length = le16_to_cpu(config->wTotalLength);
55072
55073 if (*ppos < pos + length) {
55074
55075 /* The descriptor may claim to be longer than it
55076 * really is. Here is the actual allocated length. */
55077- unsigned alloclen =
55078+ size_t alloclen =
55079 le16_to_cpu(dev->config[i].desc.wTotalLength);
55080
55081- len = length - (*ppos - pos);
55082+ len = length + pos - *ppos;
55083 if (len > nbytes)
55084 len = nbytes;
55085
55086 /* Simply don't write (skip over) unallocated parts */
55087 if (alloclen > (*ppos - pos)) {
55088- alloclen -= (*ppos - pos);
55089+ alloclen = alloclen + pos - *ppos;
55090 if (copy_to_user(buf,
55091 dev->rawdescriptors[i] + (*ppos - pos),
55092 min(len, alloclen))) {
55093diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
55094index 487abcf..06226dc 100644
55095--- a/drivers/usb/core/hcd.c
55096+++ b/drivers/usb/core/hcd.c
55097@@ -1550,7 +1550,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
55098 */
55099 usb_get_urb(urb);
55100 atomic_inc(&urb->use_count);
55101- atomic_inc(&urb->dev->urbnum);
55102+ atomic_inc_unchecked(&urb->dev->urbnum);
55103 usbmon_urb_submit(&hcd->self, urb);
55104
55105 /* NOTE requirements on root-hub callers (usbfs and the hub
55106@@ -1577,7 +1577,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
55107 urb->hcpriv = NULL;
55108 INIT_LIST_HEAD(&urb->urb_list);
55109 atomic_dec(&urb->use_count);
55110- atomic_dec(&urb->dev->urbnum);
55111+ atomic_dec_unchecked(&urb->dev->urbnum);
55112 if (atomic_read(&urb->reject))
55113 wake_up(&usb_kill_urb_queue);
55114 usb_put_urb(urb);
55115diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
55116index dc84915..cdb6624 100644
55117--- a/drivers/usb/core/hub.c
55118+++ b/drivers/usb/core/hub.c
55119@@ -27,6 +27,7 @@
55120 #include <linux/freezer.h>
55121 #include <linux/random.h>
55122 #include <linux/pm_qos.h>
55123+#include <linux/grsecurity.h>
55124
55125 #include <asm/uaccess.h>
55126 #include <asm/byteorder.h>
55127@@ -4662,6 +4663,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
55128 goto done;
55129 return;
55130 }
55131+
55132+ if (gr_handle_new_usb())
55133+ goto done;
55134+
55135 if (hub_is_superspeed(hub->hdev))
55136 unit_load = 150;
55137 else
55138diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
55139index 0c8a7fc..c45b40a 100644
55140--- a/drivers/usb/core/message.c
55141+++ b/drivers/usb/core/message.c
55142@@ -128,7 +128,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
55143 * Return: If successful, the number of bytes transferred. Otherwise, a negative
55144 * error number.
55145 */
55146-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
55147+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
55148 __u8 requesttype, __u16 value, __u16 index, void *data,
55149 __u16 size, int timeout)
55150 {
55151@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
55152 * If successful, 0. Otherwise a negative error number. The number of actual
55153 * bytes transferred will be stored in the @actual_length parameter.
55154 */
55155-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
55156+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
55157 void *data, int len, int *actual_length, int timeout)
55158 {
55159 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
55160@@ -220,7 +220,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
55161 * bytes transferred will be stored in the @actual_length parameter.
55162 *
55163 */
55164-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
55165+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
55166 void *data, int len, int *actual_length, int timeout)
55167 {
55168 struct urb *urb;
55169diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
55170index 1236c60..d47a51c 100644
55171--- a/drivers/usb/core/sysfs.c
55172+++ b/drivers/usb/core/sysfs.c
55173@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
55174 struct usb_device *udev;
55175
55176 udev = to_usb_device(dev);
55177- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
55178+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
55179 }
55180 static DEVICE_ATTR_RO(urbnum);
55181
55182diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
55183index 2dd2362..1135437 100644
55184--- a/drivers/usb/core/usb.c
55185+++ b/drivers/usb/core/usb.c
55186@@ -433,7 +433,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
55187 set_dev_node(&dev->dev, dev_to_node(bus->controller));
55188 dev->state = USB_STATE_ATTACHED;
55189 dev->lpm_disable_count = 1;
55190- atomic_set(&dev->urbnum, 0);
55191+ atomic_set_unchecked(&dev->urbnum, 0);
55192
55193 INIT_LIST_HEAD(&dev->ep0.urb_list);
55194 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
55195diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
55196index 490a6ca..1f8364d 100644
55197--- a/drivers/usb/dwc3/gadget.c
55198+++ b/drivers/usb/dwc3/gadget.c
55199@@ -615,8 +615,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
55200 if (!usb_endpoint_xfer_isoc(desc))
55201 return 0;
55202
55203- memset(&trb_link, 0, sizeof(trb_link));
55204-
55205 /* Link TRB for ISOC. The HWO bit is never reset */
55206 trb_st_hw = &dep->trb_pool[0];
55207
55208diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
55209index 8cfc319..4868255 100644
55210--- a/drivers/usb/early/ehci-dbgp.c
55211+++ b/drivers/usb/early/ehci-dbgp.c
55212@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
55213
55214 #ifdef CONFIG_KGDB
55215 static struct kgdb_io kgdbdbgp_io_ops;
55216-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
55217+static struct kgdb_io kgdbdbgp_io_ops_console;
55218+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
55219 #else
55220 #define dbgp_kgdb_mode (0)
55221 #endif
55222@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
55223 .write_char = kgdbdbgp_write_char,
55224 };
55225
55226+static struct kgdb_io kgdbdbgp_io_ops_console = {
55227+ .name = "kgdbdbgp",
55228+ .read_char = kgdbdbgp_read_char,
55229+ .write_char = kgdbdbgp_write_char,
55230+ .is_console = 1
55231+};
55232+
55233 static int kgdbdbgp_wait_time;
55234
55235 static int __init kgdbdbgp_parse_config(char *str)
55236@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
55237 ptr++;
55238 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
55239 }
55240- kgdb_register_io_module(&kgdbdbgp_io_ops);
55241- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
55242+ if (early_dbgp_console.index != -1)
55243+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
55244+ else
55245+ kgdb_register_io_module(&kgdbdbgp_io_ops);
55246
55247 return 0;
55248 }
55249diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
55250index 2b4c82d..06a8ee6 100644
55251--- a/drivers/usb/gadget/function/f_uac1.c
55252+++ b/drivers/usb/gadget/function/f_uac1.c
55253@@ -13,6 +13,7 @@
55254 #include <linux/kernel.h>
55255 #include <linux/device.h>
55256 #include <linux/atomic.h>
55257+#include <linux/module.h>
55258
55259 #include "u_uac1.h"
55260
55261diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
55262index ad0aca8..8ff84865 100644
55263--- a/drivers/usb/gadget/function/u_serial.c
55264+++ b/drivers/usb/gadget/function/u_serial.c
55265@@ -733,9 +733,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
55266 spin_lock_irq(&port->port_lock);
55267
55268 /* already open? Great. */
55269- if (port->port.count) {
55270+ if (atomic_read(&port->port.count)) {
55271 status = 0;
55272- port->port.count++;
55273+ atomic_inc(&port->port.count);
55274
55275 /* currently opening/closing? wait ... */
55276 } else if (port->openclose) {
55277@@ -794,7 +794,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
55278 tty->driver_data = port;
55279 port->port.tty = tty;
55280
55281- port->port.count = 1;
55282+ atomic_set(&port->port.count, 1);
55283 port->openclose = false;
55284
55285 /* if connected, start the I/O stream */
55286@@ -836,11 +836,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
55287
55288 spin_lock_irq(&port->port_lock);
55289
55290- if (port->port.count != 1) {
55291- if (port->port.count == 0)
55292+ if (atomic_read(&port->port.count) != 1) {
55293+ if (atomic_read(&port->port.count) == 0)
55294 WARN_ON(1);
55295 else
55296- --port->port.count;
55297+ atomic_dec(&port->port.count);
55298 goto exit;
55299 }
55300
55301@@ -850,7 +850,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
55302 * and sleep if necessary
55303 */
55304 port->openclose = true;
55305- port->port.count = 0;
55306+ atomic_set(&port->port.count, 0);
55307
55308 gser = port->port_usb;
55309 if (gser && gser->disconnect)
55310@@ -1066,7 +1066,7 @@ static int gs_closed(struct gs_port *port)
55311 int cond;
55312
55313 spin_lock_irq(&port->port_lock);
55314- cond = (port->port.count == 0) && !port->openclose;
55315+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
55316 spin_unlock_irq(&port->port_lock);
55317 return cond;
55318 }
55319@@ -1209,7 +1209,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
55320 /* if it's already open, start I/O ... and notify the serial
55321 * protocol about open/close status (connect/disconnect).
55322 */
55323- if (port->port.count) {
55324+ if (atomic_read(&port->port.count)) {
55325 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
55326 gs_start_io(port);
55327 if (gser->connect)
55328@@ -1256,7 +1256,7 @@ void gserial_disconnect(struct gserial *gser)
55329
55330 port->port_usb = NULL;
55331 gser->ioport = NULL;
55332- if (port->port.count > 0 || port->openclose) {
55333+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
55334 wake_up_interruptible(&port->drain_wait);
55335 if (port->port.tty)
55336 tty_hangup(port->port.tty);
55337@@ -1272,7 +1272,7 @@ void gserial_disconnect(struct gserial *gser)
55338
55339 /* finally, free any unused/unusable I/O buffers */
55340 spin_lock_irqsave(&port->port_lock, flags);
55341- if (port->port.count == 0 && !port->openclose)
55342+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
55343 gs_buf_free(&port->port_write_buf);
55344 gs_free_requests(gser->out, &port->read_pool, NULL);
55345 gs_free_requests(gser->out, &port->read_queue, NULL);
55346diff --git a/drivers/usb/gadget/function/u_uac1.c b/drivers/usb/gadget/function/u_uac1.c
55347index 7a55fea..cc0ed4f 100644
55348--- a/drivers/usb/gadget/function/u_uac1.c
55349+++ b/drivers/usb/gadget/function/u_uac1.c
55350@@ -16,6 +16,7 @@
55351 #include <linux/ctype.h>
55352 #include <linux/random.h>
55353 #include <linux/syscalls.h>
55354+#include <linux/module.h>
55355
55356 #include "u_uac1.h"
55357
55358diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
55359index 6130b75..3b60008 100644
55360--- a/drivers/usb/host/ehci-hub.c
55361+++ b/drivers/usb/host/ehci-hub.c
55362@@ -771,7 +771,7 @@ static struct urb *request_single_step_set_feature_urb(
55363 urb->transfer_flags = URB_DIR_IN;
55364 usb_get_urb(urb);
55365 atomic_inc(&urb->use_count);
55366- atomic_inc(&urb->dev->urbnum);
55367+ atomic_inc_unchecked(&urb->dev->urbnum);
55368 urb->setup_dma = dma_map_single(
55369 hcd->self.controller,
55370 urb->setup_packet,
55371@@ -838,7 +838,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
55372 urb->status = -EINPROGRESS;
55373 usb_get_urb(urb);
55374 atomic_inc(&urb->use_count);
55375- atomic_inc(&urb->dev->urbnum);
55376+ atomic_inc_unchecked(&urb->dev->urbnum);
55377 retval = submit_single_step_set_feature(hcd, urb, 0);
55378 if (!retval && !wait_for_completion_timeout(&done,
55379 msecs_to_jiffies(2000))) {
55380diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
55381index d0d8fad..668ef7b 100644
55382--- a/drivers/usb/host/hwa-hc.c
55383+++ b/drivers/usb/host/hwa-hc.c
55384@@ -337,7 +337,10 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
55385 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
55386 struct wahc *wa = &hwahc->wa;
55387 struct device *dev = &wa->usb_iface->dev;
55388- u8 mas_le[UWB_NUM_MAS/8];
55389+ u8 *mas_le = kmalloc(UWB_NUM_MAS/8, GFP_KERNEL);
55390+
55391+ if (mas_le == NULL)
55392+ return -ENOMEM;
55393
55394 /* Set the stream index */
55395 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
55396@@ -356,10 +359,12 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
55397 WUSB_REQ_SET_WUSB_MAS,
55398 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
55399 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
55400- mas_le, 32, USB_CTRL_SET_TIMEOUT);
55401+ mas_le, UWB_NUM_MAS/8, USB_CTRL_SET_TIMEOUT);
55402 if (result < 0)
55403 dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
55404 out:
55405+ kfree(mas_le);
55406+
55407 return result;
55408 }
55409
55410diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
55411index b3d245e..99549ed 100644
55412--- a/drivers/usb/misc/appledisplay.c
55413+++ b/drivers/usb/misc/appledisplay.c
55414@@ -84,7 +84,7 @@ struct appledisplay {
55415 struct mutex sysfslock; /* concurrent read and write */
55416 };
55417
55418-static atomic_t count_displays = ATOMIC_INIT(0);
55419+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
55420 static struct workqueue_struct *wq;
55421
55422 static void appledisplay_complete(struct urb *urb)
55423@@ -288,7 +288,7 @@ static int appledisplay_probe(struct usb_interface *iface,
55424
55425 /* Register backlight device */
55426 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
55427- atomic_inc_return(&count_displays) - 1);
55428+ atomic_inc_return_unchecked(&count_displays) - 1);
55429 memset(&props, 0, sizeof(struct backlight_properties));
55430 props.type = BACKLIGHT_RAW;
55431 props.max_brightness = 0xff;
55432diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
55433index 8d7fc48..01c4986 100644
55434--- a/drivers/usb/serial/console.c
55435+++ b/drivers/usb/serial/console.c
55436@@ -123,7 +123,7 @@ static int usb_console_setup(struct console *co, char *options)
55437
55438 info->port = port;
55439
55440- ++port->port.count;
55441+ atomic_inc(&port->port.count);
55442 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
55443 if (serial->type->set_termios) {
55444 /*
55445@@ -167,7 +167,7 @@ static int usb_console_setup(struct console *co, char *options)
55446 }
55447 /* Now that any required fake tty operations are completed restore
55448 * the tty port count */
55449- --port->port.count;
55450+ atomic_dec(&port->port.count);
55451 /* The console is special in terms of closing the device so
55452 * indicate this port is now acting as a system console. */
55453 port->port.console = 1;
55454@@ -180,7 +180,7 @@ static int usb_console_setup(struct console *co, char *options)
55455 free_tty:
55456 kfree(tty);
55457 reset_open_count:
55458- port->port.count = 0;
55459+ atomic_set(&port->port.count, 0);
55460 usb_autopm_put_interface(serial->interface);
55461 error_get_interface:
55462 usb_serial_put(serial);
55463@@ -191,7 +191,7 @@ static int usb_console_setup(struct console *co, char *options)
55464 static void usb_console_write(struct console *co,
55465 const char *buf, unsigned count)
55466 {
55467- static struct usbcons_info *info = &usbcons_info;
55468+ struct usbcons_info *info = &usbcons_info;
55469 struct usb_serial_port *port = info->port;
55470 struct usb_serial *serial;
55471 int retval = -ENODEV;
55472diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
55473index 307e339..6aa97cb 100644
55474--- a/drivers/usb/storage/usb.h
55475+++ b/drivers/usb/storage/usb.h
55476@@ -63,7 +63,7 @@ struct us_unusual_dev {
55477 __u8 useProtocol;
55478 __u8 useTransport;
55479 int (*initFunction)(struct us_data *);
55480-};
55481+} __do_const;
55482
55483
55484 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
55485diff --git a/drivers/usb/usbip/vhci.h b/drivers/usb/usbip/vhci.h
55486index a863a98..d272795 100644
55487--- a/drivers/usb/usbip/vhci.h
55488+++ b/drivers/usb/usbip/vhci.h
55489@@ -83,7 +83,7 @@ struct vhci_hcd {
55490 unsigned resuming:1;
55491 unsigned long re_timeout;
55492
55493- atomic_t seqnum;
55494+ atomic_unchecked_t seqnum;
55495
55496 /*
55497 * NOTE:
55498diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
55499index c02374b..32d47a9 100644
55500--- a/drivers/usb/usbip/vhci_hcd.c
55501+++ b/drivers/usb/usbip/vhci_hcd.c
55502@@ -439,7 +439,7 @@ static void vhci_tx_urb(struct urb *urb)
55503
55504 spin_lock(&vdev->priv_lock);
55505
55506- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
55507+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
55508 if (priv->seqnum == 0xffff)
55509 dev_info(&urb->dev->dev, "seqnum max\n");
55510
55511@@ -686,7 +686,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
55512 return -ENOMEM;
55513 }
55514
55515- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
55516+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
55517 if (unlink->seqnum == 0xffff)
55518 pr_info("seqnum max\n");
55519
55520@@ -891,7 +891,7 @@ static int vhci_start(struct usb_hcd *hcd)
55521 vdev->rhport = rhport;
55522 }
55523
55524- atomic_set(&vhci->seqnum, 0);
55525+ atomic_set_unchecked(&vhci->seqnum, 0);
55526 spin_lock_init(&vhci->lock);
55527
55528 hcd->power_budget = 0; /* no limit */
55529diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
55530index 00e4a54..d676f85 100644
55531--- a/drivers/usb/usbip/vhci_rx.c
55532+++ b/drivers/usb/usbip/vhci_rx.c
55533@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
55534 if (!urb) {
55535 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
55536 pr_info("max seqnum %d\n",
55537- atomic_read(&the_controller->seqnum));
55538+ atomic_read_unchecked(&the_controller->seqnum));
55539 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
55540 return;
55541 }
55542diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
55543index f2a8d29..7bc3fe7 100644
55544--- a/drivers/usb/wusbcore/wa-hc.h
55545+++ b/drivers/usb/wusbcore/wa-hc.h
55546@@ -240,7 +240,7 @@ struct wahc {
55547 spinlock_t xfer_list_lock;
55548 struct work_struct xfer_enqueue_work;
55549 struct work_struct xfer_error_work;
55550- atomic_t xfer_id_count;
55551+ atomic_unchecked_t xfer_id_count;
55552
55553 kernel_ulong_t quirks;
55554 };
55555@@ -305,7 +305,7 @@ static inline void wa_init(struct wahc *wa)
55556 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
55557 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
55558 wa->dto_in_use = 0;
55559- atomic_set(&wa->xfer_id_count, 1);
55560+ atomic_set_unchecked(&wa->xfer_id_count, 1);
55561 /* init the buf in URBs */
55562 for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
55563 usb_init_urb(&(wa->buf_in_urbs[index]));
55564diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
55565index e279015..c2d0dae 100644
55566--- a/drivers/usb/wusbcore/wa-xfer.c
55567+++ b/drivers/usb/wusbcore/wa-xfer.c
55568@@ -314,7 +314,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
55569 */
55570 static void wa_xfer_id_init(struct wa_xfer *xfer)
55571 {
55572- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
55573+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
55574 }
55575
55576 /* Return the xfer's ID. */
55577diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
55578index f018d8d..ccab63f 100644
55579--- a/drivers/vfio/vfio.c
55580+++ b/drivers/vfio/vfio.c
55581@@ -481,7 +481,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
55582 return 0;
55583
55584 /* TODO Prevent device auto probing */
55585- WARN("Device %s added to live group %d!\n", dev_name(dev),
55586+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
55587 iommu_group_id(group->iommu_group));
55588
55589 return 0;
55590diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
55591index 5174eba..451e6bc 100644
55592--- a/drivers/vhost/vringh.c
55593+++ b/drivers/vhost/vringh.c
55594@@ -530,17 +530,17 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
55595 /* Userspace access helpers: in this case, addresses are really userspace. */
55596 static inline int getu16_user(u16 *val, const u16 *p)
55597 {
55598- return get_user(*val, (__force u16 __user *)p);
55599+ return get_user(*val, (u16 __force_user *)p);
55600 }
55601
55602 static inline int putu16_user(u16 *p, u16 val)
55603 {
55604- return put_user(val, (__force u16 __user *)p);
55605+ return put_user(val, (u16 __force_user *)p);
55606 }
55607
55608 static inline int copydesc_user(void *dst, const void *src, size_t len)
55609 {
55610- return copy_from_user(dst, (__force void __user *)src, len) ?
55611+ return copy_from_user(dst, (void __force_user *)src, len) ?
55612 -EFAULT : 0;
55613 }
55614
55615@@ -548,19 +548,19 @@ static inline int putused_user(struct vring_used_elem *dst,
55616 const struct vring_used_elem *src,
55617 unsigned int num)
55618 {
55619- return copy_to_user((__force void __user *)dst, src,
55620+ return copy_to_user((void __force_user *)dst, src,
55621 sizeof(*dst) * num) ? -EFAULT : 0;
55622 }
55623
55624 static inline int xfer_from_user(void *src, void *dst, size_t len)
55625 {
55626- return copy_from_user(dst, (__force void __user *)src, len) ?
55627+ return copy_from_user(dst, (void __force_user *)src, len) ?
55628 -EFAULT : 0;
55629 }
55630
55631 static inline int xfer_to_user(void *dst, void *src, size_t len)
55632 {
55633- return copy_to_user((__force void __user *)dst, src, len) ?
55634+ return copy_to_user((void __force_user *)dst, src, len) ?
55635 -EFAULT : 0;
55636 }
55637
55638@@ -596,9 +596,9 @@ int vringh_init_user(struct vringh *vrh, u32 features,
55639 vrh->last_used_idx = 0;
55640 vrh->vring.num = num;
55641 /* vring expects kernel addresses, but only used via accessors. */
55642- vrh->vring.desc = (__force struct vring_desc *)desc;
55643- vrh->vring.avail = (__force struct vring_avail *)avail;
55644- vrh->vring.used = (__force struct vring_used *)used;
55645+ vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
55646+ vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
55647+ vrh->vring.used = (__force_kernel struct vring_used *)used;
55648 return 0;
55649 }
55650 EXPORT_SYMBOL(vringh_init_user);
55651@@ -800,7 +800,7 @@ static inline int getu16_kern(u16 *val, const u16 *p)
55652
55653 static inline int putu16_kern(u16 *p, u16 val)
55654 {
55655- ACCESS_ONCE(*p) = val;
55656+ ACCESS_ONCE_RW(*p) = val;
55657 return 0;
55658 }
55659
55660diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
55661index 84a110a..96312c3 100644
55662--- a/drivers/video/backlight/kb3886_bl.c
55663+++ b/drivers/video/backlight/kb3886_bl.c
55664@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
55665 static unsigned long kb3886bl_flags;
55666 #define KB3886BL_SUSPENDED 0x01
55667
55668-static struct dmi_system_id kb3886bl_device_table[] __initdata = {
55669+static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
55670 {
55671 .ident = "Sahara Touch-iT",
55672 .matches = {
55673diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
55674index 1b0b233..6f34c2c 100644
55675--- a/drivers/video/fbdev/arcfb.c
55676+++ b/drivers/video/fbdev/arcfb.c
55677@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
55678 return -ENOSPC;
55679
55680 err = 0;
55681- if ((count + p) > fbmemlength) {
55682+ if (count > (fbmemlength - p)) {
55683 count = fbmemlength - p;
55684 err = -ENOSPC;
55685 }
55686diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
55687index ff60701..814b973 100644
55688--- a/drivers/video/fbdev/aty/aty128fb.c
55689+++ b/drivers/video/fbdev/aty/aty128fb.c
55690@@ -149,7 +149,7 @@ enum {
55691 };
55692
55693 /* Must match above enum */
55694-static char * const r128_family[] = {
55695+static const char * const r128_family[] = {
55696 "AGP",
55697 "PCI",
55698 "PRO AGP",
55699diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
55700index 37ec09b..98f8862 100644
55701--- a/drivers/video/fbdev/aty/atyfb_base.c
55702+++ b/drivers/video/fbdev/aty/atyfb_base.c
55703@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
55704 par->accel_flags = var->accel_flags; /* hack */
55705
55706 if (var->accel_flags) {
55707- info->fbops->fb_sync = atyfb_sync;
55708+ pax_open_kernel();
55709+ *(void **)&info->fbops->fb_sync = atyfb_sync;
55710+ pax_close_kernel();
55711 info->flags &= ~FBINFO_HWACCEL_DISABLED;
55712 } else {
55713- info->fbops->fb_sync = NULL;
55714+ pax_open_kernel();
55715+ *(void **)&info->fbops->fb_sync = NULL;
55716+ pax_close_kernel();
55717 info->flags |= FBINFO_HWACCEL_DISABLED;
55718 }
55719
55720diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
55721index 2fa0317..4983f2a 100644
55722--- a/drivers/video/fbdev/aty/mach64_cursor.c
55723+++ b/drivers/video/fbdev/aty/mach64_cursor.c
55724@@ -8,6 +8,7 @@
55725 #include "../core/fb_draw.h"
55726
55727 #include <asm/io.h>
55728+#include <asm/pgtable.h>
55729
55730 #ifdef __sparc__
55731 #include <asm/fbio.h>
55732@@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info)
55733 info->sprite.buf_align = 16; /* and 64 lines tall. */
55734 info->sprite.flags = FB_PIXMAP_IO;
55735
55736- info->fbops->fb_cursor = atyfb_cursor;
55737+ pax_open_kernel();
55738+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
55739+ pax_close_kernel();
55740
55741 return 0;
55742 }
55743diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
55744index 900aa4e..6d49418 100644
55745--- a/drivers/video/fbdev/core/fb_defio.c
55746+++ b/drivers/video/fbdev/core/fb_defio.c
55747@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
55748
55749 BUG_ON(!fbdefio);
55750 mutex_init(&fbdefio->lock);
55751- info->fbops->fb_mmap = fb_deferred_io_mmap;
55752+ pax_open_kernel();
55753+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
55754+ pax_close_kernel();
55755 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
55756 INIT_LIST_HEAD(&fbdefio->pagelist);
55757 if (fbdefio->delay == 0) /* set a default of 1 s */
55758@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
55759 page->mapping = NULL;
55760 }
55761
55762- info->fbops->fb_mmap = NULL;
55763+ *(void **)&info->fbops->fb_mmap = NULL;
55764 mutex_destroy(&fbdefio->lock);
55765 }
55766 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
55767diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
55768index b5e85f6..290f8c7 100644
55769--- a/drivers/video/fbdev/core/fbmem.c
55770+++ b/drivers/video/fbdev/core/fbmem.c
55771@@ -1301,7 +1301,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
55772 __u32 data;
55773 int err;
55774
55775- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
55776+ err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
55777
55778 data = (__u32) (unsigned long) fix->smem_start;
55779 err |= put_user(data, &fix32->smem_start);
55780diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
55781index 4254336..282567e 100644
55782--- a/drivers/video/fbdev/hyperv_fb.c
55783+++ b/drivers/video/fbdev/hyperv_fb.c
55784@@ -240,7 +240,7 @@ static uint screen_fb_size;
55785 static inline int synthvid_send(struct hv_device *hdev,
55786 struct synthvid_msg *msg)
55787 {
55788- static atomic64_t request_id = ATOMIC64_INIT(0);
55789+ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
55790 int ret;
55791
55792 msg->pipe_hdr.type = PIPE_MSG_DATA;
55793@@ -248,7 +248,7 @@ static inline int synthvid_send(struct hv_device *hdev,
55794
55795 ret = vmbus_sendpacket(hdev->channel, msg,
55796 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
55797- atomic64_inc_return(&request_id),
55798+ atomic64_inc_return_unchecked(&request_id),
55799 VM_PKT_DATA_INBAND, 0);
55800
55801 if (ret)
55802diff --git a/drivers/video/fbdev/i810/i810_accel.c b/drivers/video/fbdev/i810/i810_accel.c
55803index 7672d2e..b56437f 100644
55804--- a/drivers/video/fbdev/i810/i810_accel.c
55805+++ b/drivers/video/fbdev/i810/i810_accel.c
55806@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
55807 }
55808 }
55809 printk("ringbuffer lockup!!!\n");
55810+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
55811 i810_report_error(mmio);
55812 par->dev_flags |= LOCKUP;
55813 info->pixmap.scan_align = 1;
55814diff --git a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55815index a01147f..5d896f8 100644
55816--- a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55817+++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55818@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
55819
55820 #ifdef CONFIG_FB_MATROX_MYSTIQUE
55821 struct matrox_switch matrox_mystique = {
55822- MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
55823+ .preinit = MGA1064_preinit,
55824+ .reset = MGA1064_reset,
55825+ .init = MGA1064_init,
55826+ .restore = MGA1064_restore,
55827 };
55828 EXPORT_SYMBOL(matrox_mystique);
55829 #endif
55830
55831 #ifdef CONFIG_FB_MATROX_G
55832 struct matrox_switch matrox_G100 = {
55833- MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
55834+ .preinit = MGAG100_preinit,
55835+ .reset = MGAG100_reset,
55836+ .init = MGAG100_init,
55837+ .restore = MGAG100_restore,
55838 };
55839 EXPORT_SYMBOL(matrox_G100);
55840 #endif
55841diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55842index 195ad7c..09743fc 100644
55843--- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55844+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55845@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
55846 }
55847
55848 struct matrox_switch matrox_millennium = {
55849- Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
55850+ .preinit = Ti3026_preinit,
55851+ .reset = Ti3026_reset,
55852+ .init = Ti3026_init,
55853+ .restore = Ti3026_restore
55854 };
55855 EXPORT_SYMBOL(matrox_millennium);
55856 #endif
55857diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
55858index fe92eed..106e085 100644
55859--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
55860+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
55861@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
55862 struct mb862xxfb_par *par = info->par;
55863
55864 if (info->var.bits_per_pixel == 32) {
55865- info->fbops->fb_fillrect = cfb_fillrect;
55866- info->fbops->fb_copyarea = cfb_copyarea;
55867- info->fbops->fb_imageblit = cfb_imageblit;
55868+ pax_open_kernel();
55869+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
55870+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
55871+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
55872+ pax_close_kernel();
55873 } else {
55874 outreg(disp, GC_L0EM, 3);
55875- info->fbops->fb_fillrect = mb86290fb_fillrect;
55876- info->fbops->fb_copyarea = mb86290fb_copyarea;
55877- info->fbops->fb_imageblit = mb86290fb_imageblit;
55878+ pax_open_kernel();
55879+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
55880+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
55881+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
55882+ pax_close_kernel();
55883 }
55884 outreg(draw, GDC_REG_DRAW_BASE, 0);
55885 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
55886diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
55887index def0412..fed6529 100644
55888--- a/drivers/video/fbdev/nvidia/nvidia.c
55889+++ b/drivers/video/fbdev/nvidia/nvidia.c
55890@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
55891 info->fix.line_length = (info->var.xres_virtual *
55892 info->var.bits_per_pixel) >> 3;
55893 if (info->var.accel_flags) {
55894- info->fbops->fb_imageblit = nvidiafb_imageblit;
55895- info->fbops->fb_fillrect = nvidiafb_fillrect;
55896- info->fbops->fb_copyarea = nvidiafb_copyarea;
55897- info->fbops->fb_sync = nvidiafb_sync;
55898+ pax_open_kernel();
55899+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
55900+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
55901+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
55902+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
55903+ pax_close_kernel();
55904 info->pixmap.scan_align = 4;
55905 info->flags &= ~FBINFO_HWACCEL_DISABLED;
55906 info->flags |= FBINFO_READS_FAST;
55907 NVResetGraphics(info);
55908 } else {
55909- info->fbops->fb_imageblit = cfb_imageblit;
55910- info->fbops->fb_fillrect = cfb_fillrect;
55911- info->fbops->fb_copyarea = cfb_copyarea;
55912- info->fbops->fb_sync = NULL;
55913+ pax_open_kernel();
55914+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
55915+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
55916+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
55917+ *(void **)&info->fbops->fb_sync = NULL;
55918+ pax_close_kernel();
55919 info->pixmap.scan_align = 1;
55920 info->flags |= FBINFO_HWACCEL_DISABLED;
55921 info->flags &= ~FBINFO_READS_FAST;
55922@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
55923 info->pixmap.size = 8 * 1024;
55924 info->pixmap.flags = FB_PIXMAP_SYSTEM;
55925
55926- if (!hwcur)
55927- info->fbops->fb_cursor = NULL;
55928+ if (!hwcur) {
55929+ pax_open_kernel();
55930+ *(void **)&info->fbops->fb_cursor = NULL;
55931+ pax_close_kernel();
55932+ }
55933
55934 info->var.accel_flags = (!noaccel);
55935
55936diff --git a/drivers/video/fbdev/omap2/dss/display.c b/drivers/video/fbdev/omap2/dss/display.c
55937index 2412a0d..294215b 100644
55938--- a/drivers/video/fbdev/omap2/dss/display.c
55939+++ b/drivers/video/fbdev/omap2/dss/display.c
55940@@ -161,12 +161,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
55941 if (dssdev->name == NULL)
55942 dssdev->name = dssdev->alias;
55943
55944+ pax_open_kernel();
55945 if (drv && drv->get_resolution == NULL)
55946- drv->get_resolution = omapdss_default_get_resolution;
55947+ *(void **)&drv->get_resolution = omapdss_default_get_resolution;
55948 if (drv && drv->get_recommended_bpp == NULL)
55949- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
55950+ *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
55951 if (drv && drv->get_timings == NULL)
55952- drv->get_timings = omapdss_default_get_timings;
55953+ *(void **)&drv->get_timings = omapdss_default_get_timings;
55954+ pax_close_kernel();
55955
55956 mutex_lock(&panel_list_mutex);
55957 list_add_tail(&dssdev->panel_list, &panel_list);
55958diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
55959index 83433cb..71e9b98 100644
55960--- a/drivers/video/fbdev/s1d13xxxfb.c
55961+++ b/drivers/video/fbdev/s1d13xxxfb.c
55962@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
55963
55964 switch(prod_id) {
55965 case S1D13506_PROD_ID: /* activate acceleration */
55966- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
55967- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
55968+ pax_open_kernel();
55969+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
55970+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
55971+ pax_close_kernel();
55972 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
55973 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
55974 break;
55975diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
55976index 2bcc84a..29dd1ea 100644
55977--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
55978+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
55979@@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle)
55980 }
55981
55982 static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
55983- lcdc_sys_write_index,
55984- lcdc_sys_write_data,
55985- lcdc_sys_read_data,
55986+ .write_index = lcdc_sys_write_index,
55987+ .write_data = lcdc_sys_write_data,
55988+ .read_data = lcdc_sys_read_data,
55989 };
55990
55991 static int sh_mobile_lcdc_sginit(struct fb_info *info,
55992diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
55993index d513ed6..90b0de9 100644
55994--- a/drivers/video/fbdev/smscufx.c
55995+++ b/drivers/video/fbdev/smscufx.c
55996@@ -1175,7 +1175,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
55997 fb_deferred_io_cleanup(info);
55998 kfree(info->fbdefio);
55999 info->fbdefio = NULL;
56000- info->fbops->fb_mmap = ufx_ops_mmap;
56001+ pax_open_kernel();
56002+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
56003+ pax_close_kernel();
56004 }
56005
56006 pr_debug("released /dev/fb%d user=%d count=%d",
56007diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
56008index 77b890e..458e666 100644
56009--- a/drivers/video/fbdev/udlfb.c
56010+++ b/drivers/video/fbdev/udlfb.c
56011@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
56012 dlfb_urb_completion(urb);
56013
56014 error:
56015- atomic_add(bytes_sent, &dev->bytes_sent);
56016- atomic_add(bytes_identical, &dev->bytes_identical);
56017- atomic_add(width*height*2, &dev->bytes_rendered);
56018+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
56019+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
56020+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
56021 end_cycles = get_cycles();
56022- atomic_add(((unsigned int) ((end_cycles - start_cycles)
56023+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
56024 >> 10)), /* Kcycles */
56025 &dev->cpu_kcycles_used);
56026
56027@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
56028 dlfb_urb_completion(urb);
56029
56030 error:
56031- atomic_add(bytes_sent, &dev->bytes_sent);
56032- atomic_add(bytes_identical, &dev->bytes_identical);
56033- atomic_add(bytes_rendered, &dev->bytes_rendered);
56034+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
56035+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
56036+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
56037 end_cycles = get_cycles();
56038- atomic_add(((unsigned int) ((end_cycles - start_cycles)
56039+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
56040 >> 10)), /* Kcycles */
56041 &dev->cpu_kcycles_used);
56042 }
56043@@ -993,7 +993,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
56044 fb_deferred_io_cleanup(info);
56045 kfree(info->fbdefio);
56046 info->fbdefio = NULL;
56047- info->fbops->fb_mmap = dlfb_ops_mmap;
56048+ pax_open_kernel();
56049+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
56050+ pax_close_kernel();
56051 }
56052
56053 pr_warn("released /dev/fb%d user=%d count=%d\n",
56054@@ -1376,7 +1378,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
56055 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56056 struct dlfb_data *dev = fb_info->par;
56057 return snprintf(buf, PAGE_SIZE, "%u\n",
56058- atomic_read(&dev->bytes_rendered));
56059+ atomic_read_unchecked(&dev->bytes_rendered));
56060 }
56061
56062 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
56063@@ -1384,7 +1386,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
56064 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56065 struct dlfb_data *dev = fb_info->par;
56066 return snprintf(buf, PAGE_SIZE, "%u\n",
56067- atomic_read(&dev->bytes_identical));
56068+ atomic_read_unchecked(&dev->bytes_identical));
56069 }
56070
56071 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
56072@@ -1392,7 +1394,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
56073 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56074 struct dlfb_data *dev = fb_info->par;
56075 return snprintf(buf, PAGE_SIZE, "%u\n",
56076- atomic_read(&dev->bytes_sent));
56077+ atomic_read_unchecked(&dev->bytes_sent));
56078 }
56079
56080 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
56081@@ -1400,7 +1402,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
56082 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56083 struct dlfb_data *dev = fb_info->par;
56084 return snprintf(buf, PAGE_SIZE, "%u\n",
56085- atomic_read(&dev->cpu_kcycles_used));
56086+ atomic_read_unchecked(&dev->cpu_kcycles_used));
56087 }
56088
56089 static ssize_t edid_show(
56090@@ -1460,10 +1462,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
56091 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56092 struct dlfb_data *dev = fb_info->par;
56093
56094- atomic_set(&dev->bytes_rendered, 0);
56095- atomic_set(&dev->bytes_identical, 0);
56096- atomic_set(&dev->bytes_sent, 0);
56097- atomic_set(&dev->cpu_kcycles_used, 0);
56098+ atomic_set_unchecked(&dev->bytes_rendered, 0);
56099+ atomic_set_unchecked(&dev->bytes_identical, 0);
56100+ atomic_set_unchecked(&dev->bytes_sent, 0);
56101+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
56102
56103 return count;
56104 }
56105diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
56106index 509d452..7c9d2de 100644
56107--- a/drivers/video/fbdev/uvesafb.c
56108+++ b/drivers/video/fbdev/uvesafb.c
56109@@ -19,6 +19,7 @@
56110 #include <linux/io.h>
56111 #include <linux/mutex.h>
56112 #include <linux/slab.h>
56113+#include <linux/moduleloader.h>
56114 #include <video/edid.h>
56115 #include <video/uvesafb.h>
56116 #ifdef CONFIG_X86
56117@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
56118 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
56119 par->pmi_setpal = par->ypan = 0;
56120 } else {
56121+
56122+#ifdef CONFIG_PAX_KERNEXEC
56123+#ifdef CONFIG_MODULES
56124+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
56125+#endif
56126+ if (!par->pmi_code) {
56127+ par->pmi_setpal = par->ypan = 0;
56128+ return 0;
56129+ }
56130+#endif
56131+
56132 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
56133 + task->t.regs.edi);
56134+
56135+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56136+ pax_open_kernel();
56137+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
56138+ pax_close_kernel();
56139+
56140+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
56141+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
56142+#else
56143 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
56144 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
56145+#endif
56146+
56147 printk(KERN_INFO "uvesafb: protected mode interface info at "
56148 "%04x:%04x\n",
56149 (u16)task->t.regs.es, (u16)task->t.regs.edi);
56150@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
56151 par->ypan = ypan;
56152
56153 if (par->pmi_setpal || par->ypan) {
56154+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
56155 if (__supported_pte_mask & _PAGE_NX) {
56156 par->pmi_setpal = par->ypan = 0;
56157 printk(KERN_WARNING "uvesafb: NX protection is active, "
56158 "better not use the PMI.\n");
56159- } else {
56160+ } else
56161+#endif
56162 uvesafb_vbe_getpmi(task, par);
56163- }
56164 }
56165 #else
56166 /* The protected mode interface is not available on non-x86. */
56167@@ -1453,8 +1477,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
56168 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
56169
56170 /* Disable blanking if the user requested so. */
56171- if (!blank)
56172- info->fbops->fb_blank = NULL;
56173+ if (!blank) {
56174+ pax_open_kernel();
56175+ *(void **)&info->fbops->fb_blank = NULL;
56176+ pax_close_kernel();
56177+ }
56178
56179 /*
56180 * Find out how much IO memory is required for the mode with
56181@@ -1525,8 +1552,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
56182 info->flags = FBINFO_FLAG_DEFAULT |
56183 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
56184
56185- if (!par->ypan)
56186- info->fbops->fb_pan_display = NULL;
56187+ if (!par->ypan) {
56188+ pax_open_kernel();
56189+ *(void **)&info->fbops->fb_pan_display = NULL;
56190+ pax_close_kernel();
56191+ }
56192 }
56193
56194 static void uvesafb_init_mtrr(struct fb_info *info)
56195@@ -1787,6 +1817,11 @@ out_mode:
56196 out:
56197 kfree(par->vbe_modes);
56198
56199+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56200+ if (par->pmi_code)
56201+ module_free_exec(NULL, par->pmi_code);
56202+#endif
56203+
56204 framebuffer_release(info);
56205 return err;
56206 }
56207@@ -1811,6 +1846,11 @@ static int uvesafb_remove(struct platform_device *dev)
56208 kfree(par->vbe_state_orig);
56209 kfree(par->vbe_state_saved);
56210
56211+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56212+ if (par->pmi_code)
56213+ module_free_exec(NULL, par->pmi_code);
56214+#endif
56215+
56216 framebuffer_release(info);
56217 }
56218 return 0;
56219diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
56220index 6170e7f..dd63031 100644
56221--- a/drivers/video/fbdev/vesafb.c
56222+++ b/drivers/video/fbdev/vesafb.c
56223@@ -9,6 +9,7 @@
56224 */
56225
56226 #include <linux/module.h>
56227+#include <linux/moduleloader.h>
56228 #include <linux/kernel.h>
56229 #include <linux/errno.h>
56230 #include <linux/string.h>
56231@@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
56232 static int vram_total; /* Set total amount of memory */
56233 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
56234 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
56235-static void (*pmi_start)(void) __read_mostly;
56236-static void (*pmi_pal) (void) __read_mostly;
56237+static void (*pmi_start)(void) __read_only;
56238+static void (*pmi_pal) (void) __read_only;
56239 static int depth __read_mostly;
56240 static int vga_compat __read_mostly;
56241 /* --------------------------------------------------------------------- */
56242@@ -233,6 +234,7 @@ static int vesafb_probe(struct platform_device *dev)
56243 unsigned int size_remap;
56244 unsigned int size_total;
56245 char *option = NULL;
56246+ void *pmi_code = NULL;
56247
56248 /* ignore error return of fb_get_options */
56249 fb_get_options("vesafb", &option);
56250@@ -279,10 +281,6 @@ static int vesafb_probe(struct platform_device *dev)
56251 size_remap = size_total;
56252 vesafb_fix.smem_len = size_remap;
56253
56254-#ifndef __i386__
56255- screen_info.vesapm_seg = 0;
56256-#endif
56257-
56258 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
56259 printk(KERN_WARNING
56260 "vesafb: cannot reserve video memory at 0x%lx\n",
56261@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
56262 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
56263 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
56264
56265+#ifdef __i386__
56266+
56267+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56268+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
56269+ if (!pmi_code)
56270+#elif !defined(CONFIG_PAX_KERNEXEC)
56271+ if (0)
56272+#endif
56273+
56274+#endif
56275+ screen_info.vesapm_seg = 0;
56276+
56277 if (screen_info.vesapm_seg) {
56278- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
56279- screen_info.vesapm_seg,screen_info.vesapm_off);
56280+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
56281+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
56282 }
56283
56284 if (screen_info.vesapm_seg < 0xc000)
56285@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
56286
56287 if (ypan || pmi_setpal) {
56288 unsigned short *pmi_base;
56289+
56290 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
56291- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
56292- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
56293+
56294+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56295+ pax_open_kernel();
56296+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
56297+#else
56298+ pmi_code = pmi_base;
56299+#endif
56300+
56301+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
56302+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
56303+
56304+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56305+ pmi_start = ktva_ktla(pmi_start);
56306+ pmi_pal = ktva_ktla(pmi_pal);
56307+ pax_close_kernel();
56308+#endif
56309+
56310 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
56311 if (pmi_base[3]) {
56312 printk(KERN_INFO "vesafb: pmi: ports = ");
56313@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
56314 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
56315 (ypan ? FBINFO_HWACCEL_YPAN : 0);
56316
56317- if (!ypan)
56318- info->fbops->fb_pan_display = NULL;
56319+ if (!ypan) {
56320+ pax_open_kernel();
56321+ *(void **)&info->fbops->fb_pan_display = NULL;
56322+ pax_close_kernel();
56323+ }
56324
56325 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
56326 err = -ENOMEM;
56327@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
56328 fb_info(info, "%s frame buffer device\n", info->fix.id);
56329 return 0;
56330 err:
56331+
56332+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56333+ module_free_exec(NULL, pmi_code);
56334+#endif
56335+
56336 if (info->screen_base)
56337 iounmap(info->screen_base);
56338 framebuffer_release(info);
56339diff --git a/drivers/video/fbdev/via/via_clock.h b/drivers/video/fbdev/via/via_clock.h
56340index 88714ae..16c2e11 100644
56341--- a/drivers/video/fbdev/via/via_clock.h
56342+++ b/drivers/video/fbdev/via/via_clock.h
56343@@ -56,7 +56,7 @@ struct via_clock {
56344
56345 void (*set_engine_pll_state)(u8 state);
56346 void (*set_engine_pll)(struct via_pll_config config);
56347-};
56348+} __no_const;
56349
56350
56351 static inline u32 get_pll_internal_frequency(u32 ref_freq,
56352diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
56353index 3c14e43..2630570 100644
56354--- a/drivers/video/logo/logo_linux_clut224.ppm
56355+++ b/drivers/video/logo/logo_linux_clut224.ppm
56356@@ -2,1603 +2,1123 @@ P3
56357 # Standard 224-color Linux logo
56358 80 80
56359 255
56360- 0 0 0 0 0 0 0 0 0 0 0 0
56361- 0 0 0 0 0 0 0 0 0 0 0 0
56362- 0 0 0 0 0 0 0 0 0 0 0 0
56363- 0 0 0 0 0 0 0 0 0 0 0 0
56364- 0 0 0 0 0 0 0 0 0 0 0 0
56365- 0 0 0 0 0 0 0 0 0 0 0 0
56366- 0 0 0 0 0 0 0 0 0 0 0 0
56367- 0 0 0 0 0 0 0 0 0 0 0 0
56368- 0 0 0 0 0 0 0 0 0 0 0 0
56369- 6 6 6 6 6 6 10 10 10 10 10 10
56370- 10 10 10 6 6 6 6 6 6 6 6 6
56371- 0 0 0 0 0 0 0 0 0 0 0 0
56372- 0 0 0 0 0 0 0 0 0 0 0 0
56373- 0 0 0 0 0 0 0 0 0 0 0 0
56374- 0 0 0 0 0 0 0 0 0 0 0 0
56375- 0 0 0 0 0 0 0 0 0 0 0 0
56376- 0 0 0 0 0 0 0 0 0 0 0 0
56377- 0 0 0 0 0 0 0 0 0 0 0 0
56378- 0 0 0 0 0 0 0 0 0 0 0 0
56379- 0 0 0 0 0 0 0 0 0 0 0 0
56380- 0 0 0 0 0 0 0 0 0 0 0 0
56381- 0 0 0 0 0 0 0 0 0 0 0 0
56382- 0 0 0 0 0 0 0 0 0 0 0 0
56383- 0 0 0 0 0 0 0 0 0 0 0 0
56384- 0 0 0 0 0 0 0 0 0 0 0 0
56385- 0 0 0 0 0 0 0 0 0 0 0 0
56386- 0 0 0 0 0 0 0 0 0 0 0 0
56387- 0 0 0 0 0 0 0 0 0 0 0 0
56388- 0 0 0 6 6 6 10 10 10 14 14 14
56389- 22 22 22 26 26 26 30 30 30 34 34 34
56390- 30 30 30 30 30 30 26 26 26 18 18 18
56391- 14 14 14 10 10 10 6 6 6 0 0 0
56392- 0 0 0 0 0 0 0 0 0 0 0 0
56393- 0 0 0 0 0 0 0 0 0 0 0 0
56394- 0 0 0 0 0 0 0 0 0 0 0 0
56395- 0 0 0 0 0 0 0 0 0 0 0 0
56396- 0 0 0 0 0 0 0 0 0 0 0 0
56397- 0 0 0 0 0 0 0 0 0 0 0 0
56398- 0 0 0 0 0 0 0 0 0 0 0 0
56399- 0 0 0 0 0 0 0 0 0 0 0 0
56400- 0 0 0 0 0 0 0 0 0 0 0 0
56401- 0 0 0 0 0 1 0 0 1 0 0 0
56402- 0 0 0 0 0 0 0 0 0 0 0 0
56403- 0 0 0 0 0 0 0 0 0 0 0 0
56404- 0 0 0 0 0 0 0 0 0 0 0 0
56405- 0 0 0 0 0 0 0 0 0 0 0 0
56406- 0 0 0 0 0 0 0 0 0 0 0 0
56407- 0 0 0 0 0 0 0 0 0 0 0 0
56408- 6 6 6 14 14 14 26 26 26 42 42 42
56409- 54 54 54 66 66 66 78 78 78 78 78 78
56410- 78 78 78 74 74 74 66 66 66 54 54 54
56411- 42 42 42 26 26 26 18 18 18 10 10 10
56412- 6 6 6 0 0 0 0 0 0 0 0 0
56413- 0 0 0 0 0 0 0 0 0 0 0 0
56414- 0 0 0 0 0 0 0 0 0 0 0 0
56415- 0 0 0 0 0 0 0 0 0 0 0 0
56416- 0 0 0 0 0 0 0 0 0 0 0 0
56417- 0 0 0 0 0 0 0 0 0 0 0 0
56418- 0 0 0 0 0 0 0 0 0 0 0 0
56419- 0 0 0 0 0 0 0 0 0 0 0 0
56420- 0 0 0 0 0 0 0 0 0 0 0 0
56421- 0 0 1 0 0 0 0 0 0 0 0 0
56422- 0 0 0 0 0 0 0 0 0 0 0 0
56423- 0 0 0 0 0 0 0 0 0 0 0 0
56424- 0 0 0 0 0 0 0 0 0 0 0 0
56425- 0 0 0 0 0 0 0 0 0 0 0 0
56426- 0 0 0 0 0 0 0 0 0 0 0 0
56427- 0 0 0 0 0 0 0 0 0 10 10 10
56428- 22 22 22 42 42 42 66 66 66 86 86 86
56429- 66 66 66 38 38 38 38 38 38 22 22 22
56430- 26 26 26 34 34 34 54 54 54 66 66 66
56431- 86 86 86 70 70 70 46 46 46 26 26 26
56432- 14 14 14 6 6 6 0 0 0 0 0 0
56433- 0 0 0 0 0 0 0 0 0 0 0 0
56434- 0 0 0 0 0 0 0 0 0 0 0 0
56435- 0 0 0 0 0 0 0 0 0 0 0 0
56436- 0 0 0 0 0 0 0 0 0 0 0 0
56437- 0 0 0 0 0 0 0 0 0 0 0 0
56438- 0 0 0 0 0 0 0 0 0 0 0 0
56439- 0 0 0 0 0 0 0 0 0 0 0 0
56440- 0 0 0 0 0 0 0 0 0 0 0 0
56441- 0 0 1 0 0 1 0 0 1 0 0 0
56442- 0 0 0 0 0 0 0 0 0 0 0 0
56443- 0 0 0 0 0 0 0 0 0 0 0 0
56444- 0 0 0 0 0 0 0 0 0 0 0 0
56445- 0 0 0 0 0 0 0 0 0 0 0 0
56446- 0 0 0 0 0 0 0 0 0 0 0 0
56447- 0 0 0 0 0 0 10 10 10 26 26 26
56448- 50 50 50 82 82 82 58 58 58 6 6 6
56449- 2 2 6 2 2 6 2 2 6 2 2 6
56450- 2 2 6 2 2 6 2 2 6 2 2 6
56451- 6 6 6 54 54 54 86 86 86 66 66 66
56452- 38 38 38 18 18 18 6 6 6 0 0 0
56453- 0 0 0 0 0 0 0 0 0 0 0 0
56454- 0 0 0 0 0 0 0 0 0 0 0 0
56455- 0 0 0 0 0 0 0 0 0 0 0 0
56456- 0 0 0 0 0 0 0 0 0 0 0 0
56457- 0 0 0 0 0 0 0 0 0 0 0 0
56458- 0 0 0 0 0 0 0 0 0 0 0 0
56459- 0 0 0 0 0 0 0 0 0 0 0 0
56460- 0 0 0 0 0 0 0 0 0 0 0 0
56461- 0 0 0 0 0 0 0 0 0 0 0 0
56462- 0 0 0 0 0 0 0 0 0 0 0 0
56463- 0 0 0 0 0 0 0 0 0 0 0 0
56464- 0 0 0 0 0 0 0 0 0 0 0 0
56465- 0 0 0 0 0 0 0 0 0 0 0 0
56466- 0 0 0 0 0 0 0 0 0 0 0 0
56467- 0 0 0 6 6 6 22 22 22 50 50 50
56468- 78 78 78 34 34 34 2 2 6 2 2 6
56469- 2 2 6 2 2 6 2 2 6 2 2 6
56470- 2 2 6 2 2 6 2 2 6 2 2 6
56471- 2 2 6 2 2 6 6 6 6 70 70 70
56472- 78 78 78 46 46 46 22 22 22 6 6 6
56473- 0 0 0 0 0 0 0 0 0 0 0 0
56474- 0 0 0 0 0 0 0 0 0 0 0 0
56475- 0 0 0 0 0 0 0 0 0 0 0 0
56476- 0 0 0 0 0 0 0 0 0 0 0 0
56477- 0 0 0 0 0 0 0 0 0 0 0 0
56478- 0 0 0 0 0 0 0 0 0 0 0 0
56479- 0 0 0 0 0 0 0 0 0 0 0 0
56480- 0 0 0 0 0 0 0 0 0 0 0 0
56481- 0 0 1 0 0 1 0 0 1 0 0 0
56482- 0 0 0 0 0 0 0 0 0 0 0 0
56483- 0 0 0 0 0 0 0 0 0 0 0 0
56484- 0 0 0 0 0 0 0 0 0 0 0 0
56485- 0 0 0 0 0 0 0 0 0 0 0 0
56486- 0 0 0 0 0 0 0 0 0 0 0 0
56487- 6 6 6 18 18 18 42 42 42 82 82 82
56488- 26 26 26 2 2 6 2 2 6 2 2 6
56489- 2 2 6 2 2 6 2 2 6 2 2 6
56490- 2 2 6 2 2 6 2 2 6 14 14 14
56491- 46 46 46 34 34 34 6 6 6 2 2 6
56492- 42 42 42 78 78 78 42 42 42 18 18 18
56493- 6 6 6 0 0 0 0 0 0 0 0 0
56494- 0 0 0 0 0 0 0 0 0 0 0 0
56495- 0 0 0 0 0 0 0 0 0 0 0 0
56496- 0 0 0 0 0 0 0 0 0 0 0 0
56497- 0 0 0 0 0 0 0 0 0 0 0 0
56498- 0 0 0 0 0 0 0 0 0 0 0 0
56499- 0 0 0 0 0 0 0 0 0 0 0 0
56500- 0 0 0 0 0 0 0 0 0 0 0 0
56501- 0 0 1 0 0 0 0 0 1 0 0 0
56502- 0 0 0 0 0 0 0 0 0 0 0 0
56503- 0 0 0 0 0 0 0 0 0 0 0 0
56504- 0 0 0 0 0 0 0 0 0 0 0 0
56505- 0 0 0 0 0 0 0 0 0 0 0 0
56506- 0 0 0 0 0 0 0 0 0 0 0 0
56507- 10 10 10 30 30 30 66 66 66 58 58 58
56508- 2 2 6 2 2 6 2 2 6 2 2 6
56509- 2 2 6 2 2 6 2 2 6 2 2 6
56510- 2 2 6 2 2 6 2 2 6 26 26 26
56511- 86 86 86 101 101 101 46 46 46 10 10 10
56512- 2 2 6 58 58 58 70 70 70 34 34 34
56513- 10 10 10 0 0 0 0 0 0 0 0 0
56514- 0 0 0 0 0 0 0 0 0 0 0 0
56515- 0 0 0 0 0 0 0 0 0 0 0 0
56516- 0 0 0 0 0 0 0 0 0 0 0 0
56517- 0 0 0 0 0 0 0 0 0 0 0 0
56518- 0 0 0 0 0 0 0 0 0 0 0 0
56519- 0 0 0 0 0 0 0 0 0 0 0 0
56520- 0 0 0 0 0 0 0 0 0 0 0 0
56521- 0 0 1 0 0 1 0 0 1 0 0 0
56522- 0 0 0 0 0 0 0 0 0 0 0 0
56523- 0 0 0 0 0 0 0 0 0 0 0 0
56524- 0 0 0 0 0 0 0 0 0 0 0 0
56525- 0 0 0 0 0 0 0 0 0 0 0 0
56526- 0 0 0 0 0 0 0 0 0 0 0 0
56527- 14 14 14 42 42 42 86 86 86 10 10 10
56528- 2 2 6 2 2 6 2 2 6 2 2 6
56529- 2 2 6 2 2 6 2 2 6 2 2 6
56530- 2 2 6 2 2 6 2 2 6 30 30 30
56531- 94 94 94 94 94 94 58 58 58 26 26 26
56532- 2 2 6 6 6 6 78 78 78 54 54 54
56533- 22 22 22 6 6 6 0 0 0 0 0 0
56534- 0 0 0 0 0 0 0 0 0 0 0 0
56535- 0 0 0 0 0 0 0 0 0 0 0 0
56536- 0 0 0 0 0 0 0 0 0 0 0 0
56537- 0 0 0 0 0 0 0 0 0 0 0 0
56538- 0 0 0 0 0 0 0 0 0 0 0 0
56539- 0 0 0 0 0 0 0 0 0 0 0 0
56540- 0 0 0 0 0 0 0 0 0 0 0 0
56541- 0 0 0 0 0 0 0 0 0 0 0 0
56542- 0 0 0 0 0 0 0 0 0 0 0 0
56543- 0 0 0 0 0 0 0 0 0 0 0 0
56544- 0 0 0 0 0 0 0 0 0 0 0 0
56545- 0 0 0 0 0 0 0 0 0 0 0 0
56546- 0 0 0 0 0 0 0 0 0 6 6 6
56547- 22 22 22 62 62 62 62 62 62 2 2 6
56548- 2 2 6 2 2 6 2 2 6 2 2 6
56549- 2 2 6 2 2 6 2 2 6 2 2 6
56550- 2 2 6 2 2 6 2 2 6 26 26 26
56551- 54 54 54 38 38 38 18 18 18 10 10 10
56552- 2 2 6 2 2 6 34 34 34 82 82 82
56553- 38 38 38 14 14 14 0 0 0 0 0 0
56554- 0 0 0 0 0 0 0 0 0 0 0 0
56555- 0 0 0 0 0 0 0 0 0 0 0 0
56556- 0 0 0 0 0 0 0 0 0 0 0 0
56557- 0 0 0 0 0 0 0 0 0 0 0 0
56558- 0 0 0 0 0 0 0 0 0 0 0 0
56559- 0 0 0 0 0 0 0 0 0 0 0 0
56560- 0 0 0 0 0 0 0 0 0 0 0 0
56561- 0 0 0 0 0 1 0 0 1 0 0 0
56562- 0 0 0 0 0 0 0 0 0 0 0 0
56563- 0 0 0 0 0 0 0 0 0 0 0 0
56564- 0 0 0 0 0 0 0 0 0 0 0 0
56565- 0 0 0 0 0 0 0 0 0 0 0 0
56566- 0 0 0 0 0 0 0 0 0 6 6 6
56567- 30 30 30 78 78 78 30 30 30 2 2 6
56568- 2 2 6 2 2 6 2 2 6 2 2 6
56569- 2 2 6 2 2 6 2 2 6 2 2 6
56570- 2 2 6 2 2 6 2 2 6 10 10 10
56571- 10 10 10 2 2 6 2 2 6 2 2 6
56572- 2 2 6 2 2 6 2 2 6 78 78 78
56573- 50 50 50 18 18 18 6 6 6 0 0 0
56574- 0 0 0 0 0 0 0 0 0 0 0 0
56575- 0 0 0 0 0 0 0 0 0 0 0 0
56576- 0 0 0 0 0 0 0 0 0 0 0 0
56577- 0 0 0 0 0 0 0 0 0 0 0 0
56578- 0 0 0 0 0 0 0 0 0 0 0 0
56579- 0 0 0 0 0 0 0 0 0 0 0 0
56580- 0 0 0 0 0 0 0 0 0 0 0 0
56581- 0 0 1 0 0 0 0 0 0 0 0 0
56582- 0 0 0 0 0 0 0 0 0 0 0 0
56583- 0 0 0 0 0 0 0 0 0 0 0 0
56584- 0 0 0 0 0 0 0 0 0 0 0 0
56585- 0 0 0 0 0 0 0 0 0 0 0 0
56586- 0 0 0 0 0 0 0 0 0 10 10 10
56587- 38 38 38 86 86 86 14 14 14 2 2 6
56588- 2 2 6 2 2 6 2 2 6 2 2 6
56589- 2 2 6 2 2 6 2 2 6 2 2 6
56590- 2 2 6 2 2 6 2 2 6 2 2 6
56591- 2 2 6 2 2 6 2 2 6 2 2 6
56592- 2 2 6 2 2 6 2 2 6 54 54 54
56593- 66 66 66 26 26 26 6 6 6 0 0 0
56594- 0 0 0 0 0 0 0 0 0 0 0 0
56595- 0 0 0 0 0 0 0 0 0 0 0 0
56596- 0 0 0 0 0 0 0 0 0 0 0 0
56597- 0 0 0 0 0 0 0 0 0 0 0 0
56598- 0 0 0 0 0 0 0 0 0 0 0 0
56599- 0 0 0 0 0 0 0 0 0 0 0 0
56600- 0 0 0 0 0 0 0 0 0 0 0 0
56601- 0 0 0 0 0 1 0 0 1 0 0 0
56602- 0 0 0 0 0 0 0 0 0 0 0 0
56603- 0 0 0 0 0 0 0 0 0 0 0 0
56604- 0 0 0 0 0 0 0 0 0 0 0 0
56605- 0 0 0 0 0 0 0 0 0 0 0 0
56606- 0 0 0 0 0 0 0 0 0 14 14 14
56607- 42 42 42 82 82 82 2 2 6 2 2 6
56608- 2 2 6 6 6 6 10 10 10 2 2 6
56609- 2 2 6 2 2 6 2 2 6 2 2 6
56610- 2 2 6 2 2 6 2 2 6 6 6 6
56611- 14 14 14 10 10 10 2 2 6 2 2 6
56612- 2 2 6 2 2 6 2 2 6 18 18 18
56613- 82 82 82 34 34 34 10 10 10 0 0 0
56614- 0 0 0 0 0 0 0 0 0 0 0 0
56615- 0 0 0 0 0 0 0 0 0 0 0 0
56616- 0 0 0 0 0 0 0 0 0 0 0 0
56617- 0 0 0 0 0 0 0 0 0 0 0 0
56618- 0 0 0 0 0 0 0 0 0 0 0 0
56619- 0 0 0 0 0 0 0 0 0 0 0 0
56620- 0 0 0 0 0 0 0 0 0 0 0 0
56621- 0 0 1 0 0 0 0 0 0 0 0 0
56622- 0 0 0 0 0 0 0 0 0 0 0 0
56623- 0 0 0 0 0 0 0 0 0 0 0 0
56624- 0 0 0 0 0 0 0 0 0 0 0 0
56625- 0 0 0 0 0 0 0 0 0 0 0 0
56626- 0 0 0 0 0 0 0 0 0 14 14 14
56627- 46 46 46 86 86 86 2 2 6 2 2 6
56628- 6 6 6 6 6 6 22 22 22 34 34 34
56629- 6 6 6 2 2 6 2 2 6 2 2 6
56630- 2 2 6 2 2 6 18 18 18 34 34 34
56631- 10 10 10 50 50 50 22 22 22 2 2 6
56632- 2 2 6 2 2 6 2 2 6 10 10 10
56633- 86 86 86 42 42 42 14 14 14 0 0 0
56634- 0 0 0 0 0 0 0 0 0 0 0 0
56635- 0 0 0 0 0 0 0 0 0 0 0 0
56636- 0 0 0 0 0 0 0 0 0 0 0 0
56637- 0 0 0 0 0 0 0 0 0 0 0 0
56638- 0 0 0 0 0 0 0 0 0 0 0 0
56639- 0 0 0 0 0 0 0 0 0 0 0 0
56640- 0 0 0 0 0 0 0 0 0 0 0 0
56641- 0 0 1 0 0 1 0 0 1 0 0 0
56642- 0 0 0 0 0 0 0 0 0 0 0 0
56643- 0 0 0 0 0 0 0 0 0 0 0 0
56644- 0 0 0 0 0 0 0 0 0 0 0 0
56645- 0 0 0 0 0 0 0 0 0 0 0 0
56646- 0 0 0 0 0 0 0 0 0 14 14 14
56647- 46 46 46 86 86 86 2 2 6 2 2 6
56648- 38 38 38 116 116 116 94 94 94 22 22 22
56649- 22 22 22 2 2 6 2 2 6 2 2 6
56650- 14 14 14 86 86 86 138 138 138 162 162 162
56651-154 154 154 38 38 38 26 26 26 6 6 6
56652- 2 2 6 2 2 6 2 2 6 2 2 6
56653- 86 86 86 46 46 46 14 14 14 0 0 0
56654- 0 0 0 0 0 0 0 0 0 0 0 0
56655- 0 0 0 0 0 0 0 0 0 0 0 0
56656- 0 0 0 0 0 0 0 0 0 0 0 0
56657- 0 0 0 0 0 0 0 0 0 0 0 0
56658- 0 0 0 0 0 0 0 0 0 0 0 0
56659- 0 0 0 0 0 0 0 0 0 0 0 0
56660- 0 0 0 0 0 0 0 0 0 0 0 0
56661- 0 0 0 0 0 0 0 0 0 0 0 0
56662- 0 0 0 0 0 0 0 0 0 0 0 0
56663- 0 0 0 0 0 0 0 0 0 0 0 0
56664- 0 0 0 0 0 0 0 0 0 0 0 0
56665- 0 0 0 0 0 0 0 0 0 0 0 0
56666- 0 0 0 0 0 0 0 0 0 14 14 14
56667- 46 46 46 86 86 86 2 2 6 14 14 14
56668-134 134 134 198 198 198 195 195 195 116 116 116
56669- 10 10 10 2 2 6 2 2 6 6 6 6
56670-101 98 89 187 187 187 210 210 210 218 218 218
56671-214 214 214 134 134 134 14 14 14 6 6 6
56672- 2 2 6 2 2 6 2 2 6 2 2 6
56673- 86 86 86 50 50 50 18 18 18 6 6 6
56674- 0 0 0 0 0 0 0 0 0 0 0 0
56675- 0 0 0 0 0 0 0 0 0 0 0 0
56676- 0 0 0 0 0 0 0 0 0 0 0 0
56677- 0 0 0 0 0 0 0 0 0 0 0 0
56678- 0 0 0 0 0 0 0 0 0 0 0 0
56679- 0 0 0 0 0 0 0 0 0 0 0 0
56680- 0 0 0 0 0 0 0 0 1 0 0 0
56681- 0 0 1 0 0 1 0 0 1 0 0 0
56682- 0 0 0 0 0 0 0 0 0 0 0 0
56683- 0 0 0 0 0 0 0 0 0 0 0 0
56684- 0 0 0 0 0 0 0 0 0 0 0 0
56685- 0 0 0 0 0 0 0 0 0 0 0 0
56686- 0 0 0 0 0 0 0 0 0 14 14 14
56687- 46 46 46 86 86 86 2 2 6 54 54 54
56688-218 218 218 195 195 195 226 226 226 246 246 246
56689- 58 58 58 2 2 6 2 2 6 30 30 30
56690-210 210 210 253 253 253 174 174 174 123 123 123
56691-221 221 221 234 234 234 74 74 74 2 2 6
56692- 2 2 6 2 2 6 2 2 6 2 2 6
56693- 70 70 70 58 58 58 22 22 22 6 6 6
56694- 0 0 0 0 0 0 0 0 0 0 0 0
56695- 0 0 0 0 0 0 0 0 0 0 0 0
56696- 0 0 0 0 0 0 0 0 0 0 0 0
56697- 0 0 0 0 0 0 0 0 0 0 0 0
56698- 0 0 0 0 0 0 0 0 0 0 0 0
56699- 0 0 0 0 0 0 0 0 0 0 0 0
56700- 0 0 0 0 0 0 0 0 0 0 0 0
56701- 0 0 0 0 0 0 0 0 0 0 0 0
56702- 0 0 0 0 0 0 0 0 0 0 0 0
56703- 0 0 0 0 0 0 0 0 0 0 0 0
56704- 0 0 0 0 0 0 0 0 0 0 0 0
56705- 0 0 0 0 0 0 0 0 0 0 0 0
56706- 0 0 0 0 0 0 0 0 0 14 14 14
56707- 46 46 46 82 82 82 2 2 6 106 106 106
56708-170 170 170 26 26 26 86 86 86 226 226 226
56709-123 123 123 10 10 10 14 14 14 46 46 46
56710-231 231 231 190 190 190 6 6 6 70 70 70
56711- 90 90 90 238 238 238 158 158 158 2 2 6
56712- 2 2 6 2 2 6 2 2 6 2 2 6
56713- 70 70 70 58 58 58 22 22 22 6 6 6
56714- 0 0 0 0 0 0 0 0 0 0 0 0
56715- 0 0 0 0 0 0 0 0 0 0 0 0
56716- 0 0 0 0 0 0 0 0 0 0 0 0
56717- 0 0 0 0 0 0 0 0 0 0 0 0
56718- 0 0 0 0 0 0 0 0 0 0 0 0
56719- 0 0 0 0 0 0 0 0 0 0 0 0
56720- 0 0 0 0 0 0 0 0 1 0 0 0
56721- 0 0 1 0 0 1 0 0 1 0 0 0
56722- 0 0 0 0 0 0 0 0 0 0 0 0
56723- 0 0 0 0 0 0 0 0 0 0 0 0
56724- 0 0 0 0 0 0 0 0 0 0 0 0
56725- 0 0 0 0 0 0 0 0 0 0 0 0
56726- 0 0 0 0 0 0 0 0 0 14 14 14
56727- 42 42 42 86 86 86 6 6 6 116 116 116
56728-106 106 106 6 6 6 70 70 70 149 149 149
56729-128 128 128 18 18 18 38 38 38 54 54 54
56730-221 221 221 106 106 106 2 2 6 14 14 14
56731- 46 46 46 190 190 190 198 198 198 2 2 6
56732- 2 2 6 2 2 6 2 2 6 2 2 6
56733- 74 74 74 62 62 62 22 22 22 6 6 6
56734- 0 0 0 0 0 0 0 0 0 0 0 0
56735- 0 0 0 0 0 0 0 0 0 0 0 0
56736- 0 0 0 0 0 0 0 0 0 0 0 0
56737- 0 0 0 0 0 0 0 0 0 0 0 0
56738- 0 0 0 0 0 0 0 0 0 0 0 0
56739- 0 0 0 0 0 0 0 0 0 0 0 0
56740- 0 0 0 0 0 0 0 0 1 0 0 0
56741- 0 0 1 0 0 0 0 0 1 0 0 0
56742- 0 0 0 0 0 0 0 0 0 0 0 0
56743- 0 0 0 0 0 0 0 0 0 0 0 0
56744- 0 0 0 0 0 0 0 0 0 0 0 0
56745- 0 0 0 0 0 0 0 0 0 0 0 0
56746- 0 0 0 0 0 0 0 0 0 14 14 14
56747- 42 42 42 94 94 94 14 14 14 101 101 101
56748-128 128 128 2 2 6 18 18 18 116 116 116
56749-118 98 46 121 92 8 121 92 8 98 78 10
56750-162 162 162 106 106 106 2 2 6 2 2 6
56751- 2 2 6 195 195 195 195 195 195 6 6 6
56752- 2 2 6 2 2 6 2 2 6 2 2 6
56753- 74 74 74 62 62 62 22 22 22 6 6 6
56754- 0 0 0 0 0 0 0 0 0 0 0 0
56755- 0 0 0 0 0 0 0 0 0 0 0 0
56756- 0 0 0 0 0 0 0 0 0 0 0 0
56757- 0 0 0 0 0 0 0 0 0 0 0 0
56758- 0 0 0 0 0 0 0 0 0 0 0 0
56759- 0 0 0 0 0 0 0 0 0 0 0 0
56760- 0 0 0 0 0 0 0 0 1 0 0 1
56761- 0 0 1 0 0 0 0 0 1 0 0 0
56762- 0 0 0 0 0 0 0 0 0 0 0 0
56763- 0 0 0 0 0 0 0 0 0 0 0 0
56764- 0 0 0 0 0 0 0 0 0 0 0 0
56765- 0 0 0 0 0 0 0 0 0 0 0 0
56766- 0 0 0 0 0 0 0 0 0 10 10 10
56767- 38 38 38 90 90 90 14 14 14 58 58 58
56768-210 210 210 26 26 26 54 38 6 154 114 10
56769-226 170 11 236 186 11 225 175 15 184 144 12
56770-215 174 15 175 146 61 37 26 9 2 2 6
56771- 70 70 70 246 246 246 138 138 138 2 2 6
56772- 2 2 6 2 2 6 2 2 6 2 2 6
56773- 70 70 70 66 66 66 26 26 26 6 6 6
56774- 0 0 0 0 0 0 0 0 0 0 0 0
56775- 0 0 0 0 0 0 0 0 0 0 0 0
56776- 0 0 0 0 0 0 0 0 0 0 0 0
56777- 0 0 0 0 0 0 0 0 0 0 0 0
56778- 0 0 0 0 0 0 0 0 0 0 0 0
56779- 0 0 0 0 0 0 0 0 0 0 0 0
56780- 0 0 0 0 0 0 0 0 0 0 0 0
56781- 0 0 0 0 0 0 0 0 0 0 0 0
56782- 0 0 0 0 0 0 0 0 0 0 0 0
56783- 0 0 0 0 0 0 0 0 0 0 0 0
56784- 0 0 0 0 0 0 0 0 0 0 0 0
56785- 0 0 0 0 0 0 0 0 0 0 0 0
56786- 0 0 0 0 0 0 0 0 0 10 10 10
56787- 38 38 38 86 86 86 14 14 14 10 10 10
56788-195 195 195 188 164 115 192 133 9 225 175 15
56789-239 182 13 234 190 10 232 195 16 232 200 30
56790-245 207 45 241 208 19 232 195 16 184 144 12
56791-218 194 134 211 206 186 42 42 42 2 2 6
56792- 2 2 6 2 2 6 2 2 6 2 2 6
56793- 50 50 50 74 74 74 30 30 30 6 6 6
56794- 0 0 0 0 0 0 0 0 0 0 0 0
56795- 0 0 0 0 0 0 0 0 0 0 0 0
56796- 0 0 0 0 0 0 0 0 0 0 0 0
56797- 0 0 0 0 0 0 0 0 0 0 0 0
56798- 0 0 0 0 0 0 0 0 0 0 0 0
56799- 0 0 0 0 0 0 0 0 0 0 0 0
56800- 0 0 0 0 0 0 0 0 0 0 0 0
56801- 0 0 0 0 0 0 0 0 0 0 0 0
56802- 0 0 0 0 0 0 0 0 0 0 0 0
56803- 0 0 0 0 0 0 0 0 0 0 0 0
56804- 0 0 0 0 0 0 0 0 0 0 0 0
56805- 0 0 0 0 0 0 0 0 0 0 0 0
56806- 0 0 0 0 0 0 0 0 0 10 10 10
56807- 34 34 34 86 86 86 14 14 14 2 2 6
56808-121 87 25 192 133 9 219 162 10 239 182 13
56809-236 186 11 232 195 16 241 208 19 244 214 54
56810-246 218 60 246 218 38 246 215 20 241 208 19
56811-241 208 19 226 184 13 121 87 25 2 2 6
56812- 2 2 6 2 2 6 2 2 6 2 2 6
56813- 50 50 50 82 82 82 34 34 34 10 10 10
56814- 0 0 0 0 0 0 0 0 0 0 0 0
56815- 0 0 0 0 0 0 0 0 0 0 0 0
56816- 0 0 0 0 0 0 0 0 0 0 0 0
56817- 0 0 0 0 0 0 0 0 0 0 0 0
56818- 0 0 0 0 0 0 0 0 0 0 0 0
56819- 0 0 0 0 0 0 0 0 0 0 0 0
56820- 0 0 0 0 0 0 0 0 0 0 0 0
56821- 0 0 0 0 0 0 0 0 0 0 0 0
56822- 0 0 0 0 0 0 0 0 0 0 0 0
56823- 0 0 0 0 0 0 0 0 0 0 0 0
56824- 0 0 0 0 0 0 0 0 0 0 0 0
56825- 0 0 0 0 0 0 0 0 0 0 0 0
56826- 0 0 0 0 0 0 0 0 0 10 10 10
56827- 34 34 34 82 82 82 30 30 30 61 42 6
56828-180 123 7 206 145 10 230 174 11 239 182 13
56829-234 190 10 238 202 15 241 208 19 246 218 74
56830-246 218 38 246 215 20 246 215 20 246 215 20
56831-226 184 13 215 174 15 184 144 12 6 6 6
56832- 2 2 6 2 2 6 2 2 6 2 2 6
56833- 26 26 26 94 94 94 42 42 42 14 14 14
56834- 0 0 0 0 0 0 0 0 0 0 0 0
56835- 0 0 0 0 0 0 0 0 0 0 0 0
56836- 0 0 0 0 0 0 0 0 0 0 0 0
56837- 0 0 0 0 0 0 0 0 0 0 0 0
56838- 0 0 0 0 0 0 0 0 0 0 0 0
56839- 0 0 0 0 0 0 0 0 0 0 0 0
56840- 0 0 0 0 0 0 0 0 0 0 0 0
56841- 0 0 0 0 0 0 0 0 0 0 0 0
56842- 0 0 0 0 0 0 0 0 0 0 0 0
56843- 0 0 0 0 0 0 0 0 0 0 0 0
56844- 0 0 0 0 0 0 0 0 0 0 0 0
56845- 0 0 0 0 0 0 0 0 0 0 0 0
56846- 0 0 0 0 0 0 0 0 0 10 10 10
56847- 30 30 30 78 78 78 50 50 50 104 69 6
56848-192 133 9 216 158 10 236 178 12 236 186 11
56849-232 195 16 241 208 19 244 214 54 245 215 43
56850-246 215 20 246 215 20 241 208 19 198 155 10
56851-200 144 11 216 158 10 156 118 10 2 2 6
56852- 2 2 6 2 2 6 2 2 6 2 2 6
56853- 6 6 6 90 90 90 54 54 54 18 18 18
56854- 6 6 6 0 0 0 0 0 0 0 0 0
56855- 0 0 0 0 0 0 0 0 0 0 0 0
56856- 0 0 0 0 0 0 0 0 0 0 0 0
56857- 0 0 0 0 0 0 0 0 0 0 0 0
56858- 0 0 0 0 0 0 0 0 0 0 0 0
56859- 0 0 0 0 0 0 0 0 0 0 0 0
56860- 0 0 0 0 0 0 0 0 0 0 0 0
56861- 0 0 0 0 0 0 0 0 0 0 0 0
56862- 0 0 0 0 0 0 0 0 0 0 0 0
56863- 0 0 0 0 0 0 0 0 0 0 0 0
56864- 0 0 0 0 0 0 0 0 0 0 0 0
56865- 0 0 0 0 0 0 0 0 0 0 0 0
56866- 0 0 0 0 0 0 0 0 0 10 10 10
56867- 30 30 30 78 78 78 46 46 46 22 22 22
56868-137 92 6 210 162 10 239 182 13 238 190 10
56869-238 202 15 241 208 19 246 215 20 246 215 20
56870-241 208 19 203 166 17 185 133 11 210 150 10
56871-216 158 10 210 150 10 102 78 10 2 2 6
56872- 6 6 6 54 54 54 14 14 14 2 2 6
56873- 2 2 6 62 62 62 74 74 74 30 30 30
56874- 10 10 10 0 0 0 0 0 0 0 0 0
56875- 0 0 0 0 0 0 0 0 0 0 0 0
56876- 0 0 0 0 0 0 0 0 0 0 0 0
56877- 0 0 0 0 0 0 0 0 0 0 0 0
56878- 0 0 0 0 0 0 0 0 0 0 0 0
56879- 0 0 0 0 0 0 0 0 0 0 0 0
56880- 0 0 0 0 0 0 0 0 0 0 0 0
56881- 0 0 0 0 0 0 0 0 0 0 0 0
56882- 0 0 0 0 0 0 0 0 0 0 0 0
56883- 0 0 0 0 0 0 0 0 0 0 0 0
56884- 0 0 0 0 0 0 0 0 0 0 0 0
56885- 0 0 0 0 0 0 0 0 0 0 0 0
56886- 0 0 0 0 0 0 0 0 0 10 10 10
56887- 34 34 34 78 78 78 50 50 50 6 6 6
56888- 94 70 30 139 102 15 190 146 13 226 184 13
56889-232 200 30 232 195 16 215 174 15 190 146 13
56890-168 122 10 192 133 9 210 150 10 213 154 11
56891-202 150 34 182 157 106 101 98 89 2 2 6
56892- 2 2 6 78 78 78 116 116 116 58 58 58
56893- 2 2 6 22 22 22 90 90 90 46 46 46
56894- 18 18 18 6 6 6 0 0 0 0 0 0
56895- 0 0 0 0 0 0 0 0 0 0 0 0
56896- 0 0 0 0 0 0 0 0 0 0 0 0
56897- 0 0 0 0 0 0 0 0 0 0 0 0
56898- 0 0 0 0 0 0 0 0 0 0 0 0
56899- 0 0 0 0 0 0 0 0 0 0 0 0
56900- 0 0 0 0 0 0 0 0 0 0 0 0
56901- 0 0 0 0 0 0 0 0 0 0 0 0
56902- 0 0 0 0 0 0 0 0 0 0 0 0
56903- 0 0 0 0 0 0 0 0 0 0 0 0
56904- 0 0 0 0 0 0 0 0 0 0 0 0
56905- 0 0 0 0 0 0 0 0 0 0 0 0
56906- 0 0 0 0 0 0 0 0 0 10 10 10
56907- 38 38 38 86 86 86 50 50 50 6 6 6
56908-128 128 128 174 154 114 156 107 11 168 122 10
56909-198 155 10 184 144 12 197 138 11 200 144 11
56910-206 145 10 206 145 10 197 138 11 188 164 115
56911-195 195 195 198 198 198 174 174 174 14 14 14
56912- 2 2 6 22 22 22 116 116 116 116 116 116
56913- 22 22 22 2 2 6 74 74 74 70 70 70
56914- 30 30 30 10 10 10 0 0 0 0 0 0
56915- 0 0 0 0 0 0 0 0 0 0 0 0
56916- 0 0 0 0 0 0 0 0 0 0 0 0
56917- 0 0 0 0 0 0 0 0 0 0 0 0
56918- 0 0 0 0 0 0 0 0 0 0 0 0
56919- 0 0 0 0 0 0 0 0 0 0 0 0
56920- 0 0 0 0 0 0 0 0 0 0 0 0
56921- 0 0 0 0 0 0 0 0 0 0 0 0
56922- 0 0 0 0 0 0 0 0 0 0 0 0
56923- 0 0 0 0 0 0 0 0 0 0 0 0
56924- 0 0 0 0 0 0 0 0 0 0 0 0
56925- 0 0 0 0 0 0 0 0 0 0 0 0
56926- 0 0 0 0 0 0 6 6 6 18 18 18
56927- 50 50 50 101 101 101 26 26 26 10 10 10
56928-138 138 138 190 190 190 174 154 114 156 107 11
56929-197 138 11 200 144 11 197 138 11 192 133 9
56930-180 123 7 190 142 34 190 178 144 187 187 187
56931-202 202 202 221 221 221 214 214 214 66 66 66
56932- 2 2 6 2 2 6 50 50 50 62 62 62
56933- 6 6 6 2 2 6 10 10 10 90 90 90
56934- 50 50 50 18 18 18 6 6 6 0 0 0
56935- 0 0 0 0 0 0 0 0 0 0 0 0
56936- 0 0 0 0 0 0 0 0 0 0 0 0
56937- 0 0 0 0 0 0 0 0 0 0 0 0
56938- 0 0 0 0 0 0 0 0 0 0 0 0
56939- 0 0 0 0 0 0 0 0 0 0 0 0
56940- 0 0 0 0 0 0 0 0 0 0 0 0
56941- 0 0 0 0 0 0 0 0 0 0 0 0
56942- 0 0 0 0 0 0 0 0 0 0 0 0
56943- 0 0 0 0 0 0 0 0 0 0 0 0
56944- 0 0 0 0 0 0 0 0 0 0 0 0
56945- 0 0 0 0 0 0 0 0 0 0 0 0
56946- 0 0 0 0 0 0 10 10 10 34 34 34
56947- 74 74 74 74 74 74 2 2 6 6 6 6
56948-144 144 144 198 198 198 190 190 190 178 166 146
56949-154 121 60 156 107 11 156 107 11 168 124 44
56950-174 154 114 187 187 187 190 190 190 210 210 210
56951-246 246 246 253 253 253 253 253 253 182 182 182
56952- 6 6 6 2 2 6 2 2 6 2 2 6
56953- 2 2 6 2 2 6 2 2 6 62 62 62
56954- 74 74 74 34 34 34 14 14 14 0 0 0
56955- 0 0 0 0 0 0 0 0 0 0 0 0
56956- 0 0 0 0 0 0 0 0 0 0 0 0
56957- 0 0 0 0 0 0 0 0 0 0 0 0
56958- 0 0 0 0 0 0 0 0 0 0 0 0
56959- 0 0 0 0 0 0 0 0 0 0 0 0
56960- 0 0 0 0 0 0 0 0 0 0 0 0
56961- 0 0 0 0 0 0 0 0 0 0 0 0
56962- 0 0 0 0 0 0 0 0 0 0 0 0
56963- 0 0 0 0 0 0 0 0 0 0 0 0
56964- 0 0 0 0 0 0 0 0 0 0 0 0
56965- 0 0 0 0 0 0 0 0 0 0 0 0
56966- 0 0 0 10 10 10 22 22 22 54 54 54
56967- 94 94 94 18 18 18 2 2 6 46 46 46
56968-234 234 234 221 221 221 190 190 190 190 190 190
56969-190 190 190 187 187 187 187 187 187 190 190 190
56970-190 190 190 195 195 195 214 214 214 242 242 242
56971-253 253 253 253 253 253 253 253 253 253 253 253
56972- 82 82 82 2 2 6 2 2 6 2 2 6
56973- 2 2 6 2 2 6 2 2 6 14 14 14
56974- 86 86 86 54 54 54 22 22 22 6 6 6
56975- 0 0 0 0 0 0 0 0 0 0 0 0
56976- 0 0 0 0 0 0 0 0 0 0 0 0
56977- 0 0 0 0 0 0 0 0 0 0 0 0
56978- 0 0 0 0 0 0 0 0 0 0 0 0
56979- 0 0 0 0 0 0 0 0 0 0 0 0
56980- 0 0 0 0 0 0 0 0 0 0 0 0
56981- 0 0 0 0 0 0 0 0 0 0 0 0
56982- 0 0 0 0 0 0 0 0 0 0 0 0
56983- 0 0 0 0 0 0 0 0 0 0 0 0
56984- 0 0 0 0 0 0 0 0 0 0 0 0
56985- 0 0 0 0 0 0 0 0 0 0 0 0
56986- 6 6 6 18 18 18 46 46 46 90 90 90
56987- 46 46 46 18 18 18 6 6 6 182 182 182
56988-253 253 253 246 246 246 206 206 206 190 190 190
56989-190 190 190 190 190 190 190 190 190 190 190 190
56990-206 206 206 231 231 231 250 250 250 253 253 253
56991-253 253 253 253 253 253 253 253 253 253 253 253
56992-202 202 202 14 14 14 2 2 6 2 2 6
56993- 2 2 6 2 2 6 2 2 6 2 2 6
56994- 42 42 42 86 86 86 42 42 42 18 18 18
56995- 6 6 6 0 0 0 0 0 0 0 0 0
56996- 0 0 0 0 0 0 0 0 0 0 0 0
56997- 0 0 0 0 0 0 0 0 0 0 0 0
56998- 0 0 0 0 0 0 0 0 0 0 0 0
56999- 0 0 0 0 0 0 0 0 0 0 0 0
57000- 0 0 0 0 0 0 0 0 0 0 0 0
57001- 0 0 0 0 0 0 0 0 0 0 0 0
57002- 0 0 0 0 0 0 0 0 0 0 0 0
57003- 0 0 0 0 0 0 0 0 0 0 0 0
57004- 0 0 0 0 0 0 0 0 0 0 0 0
57005- 0 0 0 0 0 0 0 0 0 6 6 6
57006- 14 14 14 38 38 38 74 74 74 66 66 66
57007- 2 2 6 6 6 6 90 90 90 250 250 250
57008-253 253 253 253 253 253 238 238 238 198 198 198
57009-190 190 190 190 190 190 195 195 195 221 221 221
57010-246 246 246 253 253 253 253 253 253 253 253 253
57011-253 253 253 253 253 253 253 253 253 253 253 253
57012-253 253 253 82 82 82 2 2 6 2 2 6
57013- 2 2 6 2 2 6 2 2 6 2 2 6
57014- 2 2 6 78 78 78 70 70 70 34 34 34
57015- 14 14 14 6 6 6 0 0 0 0 0 0
57016- 0 0 0 0 0 0 0 0 0 0 0 0
57017- 0 0 0 0 0 0 0 0 0 0 0 0
57018- 0 0 0 0 0 0 0 0 0 0 0 0
57019- 0 0 0 0 0 0 0 0 0 0 0 0
57020- 0 0 0 0 0 0 0 0 0 0 0 0
57021- 0 0 0 0 0 0 0 0 0 0 0 0
57022- 0 0 0 0 0 0 0 0 0 0 0 0
57023- 0 0 0 0 0 0 0 0 0 0 0 0
57024- 0 0 0 0 0 0 0 0 0 0 0 0
57025- 0 0 0 0 0 0 0 0 0 14 14 14
57026- 34 34 34 66 66 66 78 78 78 6 6 6
57027- 2 2 6 18 18 18 218 218 218 253 253 253
57028-253 253 253 253 253 253 253 253 253 246 246 246
57029-226 226 226 231 231 231 246 246 246 253 253 253
57030-253 253 253 253 253 253 253 253 253 253 253 253
57031-253 253 253 253 253 253 253 253 253 253 253 253
57032-253 253 253 178 178 178 2 2 6 2 2 6
57033- 2 2 6 2 2 6 2 2 6 2 2 6
57034- 2 2 6 18 18 18 90 90 90 62 62 62
57035- 30 30 30 10 10 10 0 0 0 0 0 0
57036- 0 0 0 0 0 0 0 0 0 0 0 0
57037- 0 0 0 0 0 0 0 0 0 0 0 0
57038- 0 0 0 0 0 0 0 0 0 0 0 0
57039- 0 0 0 0 0 0 0 0 0 0 0 0
57040- 0 0 0 0 0 0 0 0 0 0 0 0
57041- 0 0 0 0 0 0 0 0 0 0 0 0
57042- 0 0 0 0 0 0 0 0 0 0 0 0
57043- 0 0 0 0 0 0 0 0 0 0 0 0
57044- 0 0 0 0 0 0 0 0 0 0 0 0
57045- 0 0 0 0 0 0 10 10 10 26 26 26
57046- 58 58 58 90 90 90 18 18 18 2 2 6
57047- 2 2 6 110 110 110 253 253 253 253 253 253
57048-253 253 253 253 253 253 253 253 253 253 253 253
57049-250 250 250 253 253 253 253 253 253 253 253 253
57050-253 253 253 253 253 253 253 253 253 253 253 253
57051-253 253 253 253 253 253 253 253 253 253 253 253
57052-253 253 253 231 231 231 18 18 18 2 2 6
57053- 2 2 6 2 2 6 2 2 6 2 2 6
57054- 2 2 6 2 2 6 18 18 18 94 94 94
57055- 54 54 54 26 26 26 10 10 10 0 0 0
57056- 0 0 0 0 0 0 0 0 0 0 0 0
57057- 0 0 0 0 0 0 0 0 0 0 0 0
57058- 0 0 0 0 0 0 0 0 0 0 0 0
57059- 0 0 0 0 0 0 0 0 0 0 0 0
57060- 0 0 0 0 0 0 0 0 0 0 0 0
57061- 0 0 0 0 0 0 0 0 0 0 0 0
57062- 0 0 0 0 0 0 0 0 0 0 0 0
57063- 0 0 0 0 0 0 0 0 0 0 0 0
57064- 0 0 0 0 0 0 0 0 0 0 0 0
57065- 0 0 0 6 6 6 22 22 22 50 50 50
57066- 90 90 90 26 26 26 2 2 6 2 2 6
57067- 14 14 14 195 195 195 250 250 250 253 253 253
57068-253 253 253 253 253 253 253 253 253 253 253 253
57069-253 253 253 253 253 253 253 253 253 253 253 253
57070-253 253 253 253 253 253 253 253 253 253 253 253
57071-253 253 253 253 253 253 253 253 253 253 253 253
57072-250 250 250 242 242 242 54 54 54 2 2 6
57073- 2 2 6 2 2 6 2 2 6 2 2 6
57074- 2 2 6 2 2 6 2 2 6 38 38 38
57075- 86 86 86 50 50 50 22 22 22 6 6 6
57076- 0 0 0 0 0 0 0 0 0 0 0 0
57077- 0 0 0 0 0 0 0 0 0 0 0 0
57078- 0 0 0 0 0 0 0 0 0 0 0 0
57079- 0 0 0 0 0 0 0 0 0 0 0 0
57080- 0 0 0 0 0 0 0 0 0 0 0 0
57081- 0 0 0 0 0 0 0 0 0 0 0 0
57082- 0 0 0 0 0 0 0 0 0 0 0 0
57083- 0 0 0 0 0 0 0 0 0 0 0 0
57084- 0 0 0 0 0 0 0 0 0 0 0 0
57085- 6 6 6 14 14 14 38 38 38 82 82 82
57086- 34 34 34 2 2 6 2 2 6 2 2 6
57087- 42 42 42 195 195 195 246 246 246 253 253 253
57088-253 253 253 253 253 253 253 253 253 250 250 250
57089-242 242 242 242 242 242 250 250 250 253 253 253
57090-253 253 253 253 253 253 253 253 253 253 253 253
57091-253 253 253 250 250 250 246 246 246 238 238 238
57092-226 226 226 231 231 231 101 101 101 6 6 6
57093- 2 2 6 2 2 6 2 2 6 2 2 6
57094- 2 2 6 2 2 6 2 2 6 2 2 6
57095- 38 38 38 82 82 82 42 42 42 14 14 14
57096- 6 6 6 0 0 0 0 0 0 0 0 0
57097- 0 0 0 0 0 0 0 0 0 0 0 0
57098- 0 0 0 0 0 0 0 0 0 0 0 0
57099- 0 0 0 0 0 0 0 0 0 0 0 0
57100- 0 0 0 0 0 0 0 0 0 0 0 0
57101- 0 0 0 0 0 0 0 0 0 0 0 0
57102- 0 0 0 0 0 0 0 0 0 0 0 0
57103- 0 0 0 0 0 0 0 0 0 0 0 0
57104- 0 0 0 0 0 0 0 0 0 0 0 0
57105- 10 10 10 26 26 26 62 62 62 66 66 66
57106- 2 2 6 2 2 6 2 2 6 6 6 6
57107- 70 70 70 170 170 170 206 206 206 234 234 234
57108-246 246 246 250 250 250 250 250 250 238 238 238
57109-226 226 226 231 231 231 238 238 238 250 250 250
57110-250 250 250 250 250 250 246 246 246 231 231 231
57111-214 214 214 206 206 206 202 202 202 202 202 202
57112-198 198 198 202 202 202 182 182 182 18 18 18
57113- 2 2 6 2 2 6 2 2 6 2 2 6
57114- 2 2 6 2 2 6 2 2 6 2 2 6
57115- 2 2 6 62 62 62 66 66 66 30 30 30
57116- 10 10 10 0 0 0 0 0 0 0 0 0
57117- 0 0 0 0 0 0 0 0 0 0 0 0
57118- 0 0 0 0 0 0 0 0 0 0 0 0
57119- 0 0 0 0 0 0 0 0 0 0 0 0
57120- 0 0 0 0 0 0 0 0 0 0 0 0
57121- 0 0 0 0 0 0 0 0 0 0 0 0
57122- 0 0 0 0 0 0 0 0 0 0 0 0
57123- 0 0 0 0 0 0 0 0 0 0 0 0
57124- 0 0 0 0 0 0 0 0 0 0 0 0
57125- 14 14 14 42 42 42 82 82 82 18 18 18
57126- 2 2 6 2 2 6 2 2 6 10 10 10
57127- 94 94 94 182 182 182 218 218 218 242 242 242
57128-250 250 250 253 253 253 253 253 253 250 250 250
57129-234 234 234 253 253 253 253 253 253 253 253 253
57130-253 253 253 253 253 253 253 253 253 246 246 246
57131-238 238 238 226 226 226 210 210 210 202 202 202
57132-195 195 195 195 195 195 210 210 210 158 158 158
57133- 6 6 6 14 14 14 50 50 50 14 14 14
57134- 2 2 6 2 2 6 2 2 6 2 2 6
57135- 2 2 6 6 6 6 86 86 86 46 46 46
57136- 18 18 18 6 6 6 0 0 0 0 0 0
57137- 0 0 0 0 0 0 0 0 0 0 0 0
57138- 0 0 0 0 0 0 0 0 0 0 0 0
57139- 0 0 0 0 0 0 0 0 0 0 0 0
57140- 0 0 0 0 0 0 0 0 0 0 0 0
57141- 0 0 0 0 0 0 0 0 0 0 0 0
57142- 0 0 0 0 0 0 0 0 0 0 0 0
57143- 0 0 0 0 0 0 0 0 0 0 0 0
57144- 0 0 0 0 0 0 0 0 0 6 6 6
57145- 22 22 22 54 54 54 70 70 70 2 2 6
57146- 2 2 6 10 10 10 2 2 6 22 22 22
57147-166 166 166 231 231 231 250 250 250 253 253 253
57148-253 253 253 253 253 253 253 253 253 250 250 250
57149-242 242 242 253 253 253 253 253 253 253 253 253
57150-253 253 253 253 253 253 253 253 253 253 253 253
57151-253 253 253 253 253 253 253 253 253 246 246 246
57152-231 231 231 206 206 206 198 198 198 226 226 226
57153- 94 94 94 2 2 6 6 6 6 38 38 38
57154- 30 30 30 2 2 6 2 2 6 2 2 6
57155- 2 2 6 2 2 6 62 62 62 66 66 66
57156- 26 26 26 10 10 10 0 0 0 0 0 0
57157- 0 0 0 0 0 0 0 0 0 0 0 0
57158- 0 0 0 0 0 0 0 0 0 0 0 0
57159- 0 0 0 0 0 0 0 0 0 0 0 0
57160- 0 0 0 0 0 0 0 0 0 0 0 0
57161- 0 0 0 0 0 0 0 0 0 0 0 0
57162- 0 0 0 0 0 0 0 0 0 0 0 0
57163- 0 0 0 0 0 0 0 0 0 0 0 0
57164- 0 0 0 0 0 0 0 0 0 10 10 10
57165- 30 30 30 74 74 74 50 50 50 2 2 6
57166- 26 26 26 26 26 26 2 2 6 106 106 106
57167-238 238 238 253 253 253 253 253 253 253 253 253
57168-253 253 253 253 253 253 253 253 253 253 253 253
57169-253 253 253 253 253 253 253 253 253 253 253 253
57170-253 253 253 253 253 253 253 253 253 253 253 253
57171-253 253 253 253 253 253 253 253 253 253 253 253
57172-253 253 253 246 246 246 218 218 218 202 202 202
57173-210 210 210 14 14 14 2 2 6 2 2 6
57174- 30 30 30 22 22 22 2 2 6 2 2 6
57175- 2 2 6 2 2 6 18 18 18 86 86 86
57176- 42 42 42 14 14 14 0 0 0 0 0 0
57177- 0 0 0 0 0 0 0 0 0 0 0 0
57178- 0 0 0 0 0 0 0 0 0 0 0 0
57179- 0 0 0 0 0 0 0 0 0 0 0 0
57180- 0 0 0 0 0 0 0 0 0 0 0 0
57181- 0 0 0 0 0 0 0 0 0 0 0 0
57182- 0 0 0 0 0 0 0 0 0 0 0 0
57183- 0 0 0 0 0 0 0 0 0 0 0 0
57184- 0 0 0 0 0 0 0 0 0 14 14 14
57185- 42 42 42 90 90 90 22 22 22 2 2 6
57186- 42 42 42 2 2 6 18 18 18 218 218 218
57187-253 253 253 253 253 253 253 253 253 253 253 253
57188-253 253 253 253 253 253 253 253 253 253 253 253
57189-253 253 253 253 253 253 253 253 253 253 253 253
57190-253 253 253 253 253 253 253 253 253 253 253 253
57191-253 253 253 253 253 253 253 253 253 253 253 253
57192-253 253 253 253 253 253 250 250 250 221 221 221
57193-218 218 218 101 101 101 2 2 6 14 14 14
57194- 18 18 18 38 38 38 10 10 10 2 2 6
57195- 2 2 6 2 2 6 2 2 6 78 78 78
57196- 58 58 58 22 22 22 6 6 6 0 0 0
57197- 0 0 0 0 0 0 0 0 0 0 0 0
57198- 0 0 0 0 0 0 0 0 0 0 0 0
57199- 0 0 0 0 0 0 0 0 0 0 0 0
57200- 0 0 0 0 0 0 0 0 0 0 0 0
57201- 0 0 0 0 0 0 0 0 0 0 0 0
57202- 0 0 0 0 0 0 0 0 0 0 0 0
57203- 0 0 0 0 0 0 0 0 0 0 0 0
57204- 0 0 0 0 0 0 6 6 6 18 18 18
57205- 54 54 54 82 82 82 2 2 6 26 26 26
57206- 22 22 22 2 2 6 123 123 123 253 253 253
57207-253 253 253 253 253 253 253 253 253 253 253 253
57208-253 253 253 253 253 253 253 253 253 253 253 253
57209-253 253 253 253 253 253 253 253 253 253 253 253
57210-253 253 253 253 253 253 253 253 253 253 253 253
57211-253 253 253 253 253 253 253 253 253 253 253 253
57212-253 253 253 253 253 253 253 253 253 250 250 250
57213-238 238 238 198 198 198 6 6 6 38 38 38
57214- 58 58 58 26 26 26 38 38 38 2 2 6
57215- 2 2 6 2 2 6 2 2 6 46 46 46
57216- 78 78 78 30 30 30 10 10 10 0 0 0
57217- 0 0 0 0 0 0 0 0 0 0 0 0
57218- 0 0 0 0 0 0 0 0 0 0 0 0
57219- 0 0 0 0 0 0 0 0 0 0 0 0
57220- 0 0 0 0 0 0 0 0 0 0 0 0
57221- 0 0 0 0 0 0 0 0 0 0 0 0
57222- 0 0 0 0 0 0 0 0 0 0 0 0
57223- 0 0 0 0 0 0 0 0 0 0 0 0
57224- 0 0 0 0 0 0 10 10 10 30 30 30
57225- 74 74 74 58 58 58 2 2 6 42 42 42
57226- 2 2 6 22 22 22 231 231 231 253 253 253
57227-253 253 253 253 253 253 253 253 253 253 253 253
57228-253 253 253 253 253 253 253 253 253 250 250 250
57229-253 253 253 253 253 253 253 253 253 253 253 253
57230-253 253 253 253 253 253 253 253 253 253 253 253
57231-253 253 253 253 253 253 253 253 253 253 253 253
57232-253 253 253 253 253 253 253 253 253 253 253 253
57233-253 253 253 246 246 246 46 46 46 38 38 38
57234- 42 42 42 14 14 14 38 38 38 14 14 14
57235- 2 2 6 2 2 6 2 2 6 6 6 6
57236- 86 86 86 46 46 46 14 14 14 0 0 0
57237- 0 0 0 0 0 0 0 0 0 0 0 0
57238- 0 0 0 0 0 0 0 0 0 0 0 0
57239- 0 0 0 0 0 0 0 0 0 0 0 0
57240- 0 0 0 0 0 0 0 0 0 0 0 0
57241- 0 0 0 0 0 0 0 0 0 0 0 0
57242- 0 0 0 0 0 0 0 0 0 0 0 0
57243- 0 0 0 0 0 0 0 0 0 0 0 0
57244- 0 0 0 6 6 6 14 14 14 42 42 42
57245- 90 90 90 18 18 18 18 18 18 26 26 26
57246- 2 2 6 116 116 116 253 253 253 253 253 253
57247-253 253 253 253 253 253 253 253 253 253 253 253
57248-253 253 253 253 253 253 250 250 250 238 238 238
57249-253 253 253 253 253 253 253 253 253 253 253 253
57250-253 253 253 253 253 253 253 253 253 253 253 253
57251-253 253 253 253 253 253 253 253 253 253 253 253
57252-253 253 253 253 253 253 253 253 253 253 253 253
57253-253 253 253 253 253 253 94 94 94 6 6 6
57254- 2 2 6 2 2 6 10 10 10 34 34 34
57255- 2 2 6 2 2 6 2 2 6 2 2 6
57256- 74 74 74 58 58 58 22 22 22 6 6 6
57257- 0 0 0 0 0 0 0 0 0 0 0 0
57258- 0 0 0 0 0 0 0 0 0 0 0 0
57259- 0 0 0 0 0 0 0 0 0 0 0 0
57260- 0 0 0 0 0 0 0 0 0 0 0 0
57261- 0 0 0 0 0 0 0 0 0 0 0 0
57262- 0 0 0 0 0 0 0 0 0 0 0 0
57263- 0 0 0 0 0 0 0 0 0 0 0 0
57264- 0 0 0 10 10 10 26 26 26 66 66 66
57265- 82 82 82 2 2 6 38 38 38 6 6 6
57266- 14 14 14 210 210 210 253 253 253 253 253 253
57267-253 253 253 253 253 253 253 253 253 253 253 253
57268-253 253 253 253 253 253 246 246 246 242 242 242
57269-253 253 253 253 253 253 253 253 253 253 253 253
57270-253 253 253 253 253 253 253 253 253 253 253 253
57271-253 253 253 253 253 253 253 253 253 253 253 253
57272-253 253 253 253 253 253 253 253 253 253 253 253
57273-253 253 253 253 253 253 144 144 144 2 2 6
57274- 2 2 6 2 2 6 2 2 6 46 46 46
57275- 2 2 6 2 2 6 2 2 6 2 2 6
57276- 42 42 42 74 74 74 30 30 30 10 10 10
57277- 0 0 0 0 0 0 0 0 0 0 0 0
57278- 0 0 0 0 0 0 0 0 0 0 0 0
57279- 0 0 0 0 0 0 0 0 0 0 0 0
57280- 0 0 0 0 0 0 0 0 0 0 0 0
57281- 0 0 0 0 0 0 0 0 0 0 0 0
57282- 0 0 0 0 0 0 0 0 0 0 0 0
57283- 0 0 0 0 0 0 0 0 0 0 0 0
57284- 6 6 6 14 14 14 42 42 42 90 90 90
57285- 26 26 26 6 6 6 42 42 42 2 2 6
57286- 74 74 74 250 250 250 253 253 253 253 253 253
57287-253 253 253 253 253 253 253 253 253 253 253 253
57288-253 253 253 253 253 253 242 242 242 242 242 242
57289-253 253 253 253 253 253 253 253 253 253 253 253
57290-253 253 253 253 253 253 253 253 253 253 253 253
57291-253 253 253 253 253 253 253 253 253 253 253 253
57292-253 253 253 253 253 253 253 253 253 253 253 253
57293-253 253 253 253 253 253 182 182 182 2 2 6
57294- 2 2 6 2 2 6 2 2 6 46 46 46
57295- 2 2 6 2 2 6 2 2 6 2 2 6
57296- 10 10 10 86 86 86 38 38 38 10 10 10
57297- 0 0 0 0 0 0 0 0 0 0 0 0
57298- 0 0 0 0 0 0 0 0 0 0 0 0
57299- 0 0 0 0 0 0 0 0 0 0 0 0
57300- 0 0 0 0 0 0 0 0 0 0 0 0
57301- 0 0 0 0 0 0 0 0 0 0 0 0
57302- 0 0 0 0 0 0 0 0 0 0 0 0
57303- 0 0 0 0 0 0 0 0 0 0 0 0
57304- 10 10 10 26 26 26 66 66 66 82 82 82
57305- 2 2 6 22 22 22 18 18 18 2 2 6
57306-149 149 149 253 253 253 253 253 253 253 253 253
57307-253 253 253 253 253 253 253 253 253 253 253 253
57308-253 253 253 253 253 253 234 234 234 242 242 242
57309-253 253 253 253 253 253 253 253 253 253 253 253
57310-253 253 253 253 253 253 253 253 253 253 253 253
57311-253 253 253 253 253 253 253 253 253 253 253 253
57312-253 253 253 253 253 253 253 253 253 253 253 253
57313-253 253 253 253 253 253 206 206 206 2 2 6
57314- 2 2 6 2 2 6 2 2 6 38 38 38
57315- 2 2 6 2 2 6 2 2 6 2 2 6
57316- 6 6 6 86 86 86 46 46 46 14 14 14
57317- 0 0 0 0 0 0 0 0 0 0 0 0
57318- 0 0 0 0 0 0 0 0 0 0 0 0
57319- 0 0 0 0 0 0 0 0 0 0 0 0
57320- 0 0 0 0 0 0 0 0 0 0 0 0
57321- 0 0 0 0 0 0 0 0 0 0 0 0
57322- 0 0 0 0 0 0 0 0 0 0 0 0
57323- 0 0 0 0 0 0 0 0 0 6 6 6
57324- 18 18 18 46 46 46 86 86 86 18 18 18
57325- 2 2 6 34 34 34 10 10 10 6 6 6
57326-210 210 210 253 253 253 253 253 253 253 253 253
57327-253 253 253 253 253 253 253 253 253 253 253 253
57328-253 253 253 253 253 253 234 234 234 242 242 242
57329-253 253 253 253 253 253 253 253 253 253 253 253
57330-253 253 253 253 253 253 253 253 253 253 253 253
57331-253 253 253 253 253 253 253 253 253 253 253 253
57332-253 253 253 253 253 253 253 253 253 253 253 253
57333-253 253 253 253 253 253 221 221 221 6 6 6
57334- 2 2 6 2 2 6 6 6 6 30 30 30
57335- 2 2 6 2 2 6 2 2 6 2 2 6
57336- 2 2 6 82 82 82 54 54 54 18 18 18
57337- 6 6 6 0 0 0 0 0 0 0 0 0
57338- 0 0 0 0 0 0 0 0 0 0 0 0
57339- 0 0 0 0 0 0 0 0 0 0 0 0
57340- 0 0 0 0 0 0 0 0 0 0 0 0
57341- 0 0 0 0 0 0 0 0 0 0 0 0
57342- 0 0 0 0 0 0 0 0 0 0 0 0
57343- 0 0 0 0 0 0 0 0 0 10 10 10
57344- 26 26 26 66 66 66 62 62 62 2 2 6
57345- 2 2 6 38 38 38 10 10 10 26 26 26
57346-238 238 238 253 253 253 253 253 253 253 253 253
57347-253 253 253 253 253 253 253 253 253 253 253 253
57348-253 253 253 253 253 253 231 231 231 238 238 238
57349-253 253 253 253 253 253 253 253 253 253 253 253
57350-253 253 253 253 253 253 253 253 253 253 253 253
57351-253 253 253 253 253 253 253 253 253 253 253 253
57352-253 253 253 253 253 253 253 253 253 253 253 253
57353-253 253 253 253 253 253 231 231 231 6 6 6
57354- 2 2 6 2 2 6 10 10 10 30 30 30
57355- 2 2 6 2 2 6 2 2 6 2 2 6
57356- 2 2 6 66 66 66 58 58 58 22 22 22
57357- 6 6 6 0 0 0 0 0 0 0 0 0
57358- 0 0 0 0 0 0 0 0 0 0 0 0
57359- 0 0 0 0 0 0 0 0 0 0 0 0
57360- 0 0 0 0 0 0 0 0 0 0 0 0
57361- 0 0 0 0 0 0 0 0 0 0 0 0
57362- 0 0 0 0 0 0 0 0 0 0 0 0
57363- 0 0 0 0 0 0 0 0 0 10 10 10
57364- 38 38 38 78 78 78 6 6 6 2 2 6
57365- 2 2 6 46 46 46 14 14 14 42 42 42
57366-246 246 246 253 253 253 253 253 253 253 253 253
57367-253 253 253 253 253 253 253 253 253 253 253 253
57368-253 253 253 253 253 253 231 231 231 242 242 242
57369-253 253 253 253 253 253 253 253 253 253 253 253
57370-253 253 253 253 253 253 253 253 253 253 253 253
57371-253 253 253 253 253 253 253 253 253 253 253 253
57372-253 253 253 253 253 253 253 253 253 253 253 253
57373-253 253 253 253 253 253 234 234 234 10 10 10
57374- 2 2 6 2 2 6 22 22 22 14 14 14
57375- 2 2 6 2 2 6 2 2 6 2 2 6
57376- 2 2 6 66 66 66 62 62 62 22 22 22
57377- 6 6 6 0 0 0 0 0 0 0 0 0
57378- 0 0 0 0 0 0 0 0 0 0 0 0
57379- 0 0 0 0 0 0 0 0 0 0 0 0
57380- 0 0 0 0 0 0 0 0 0 0 0 0
57381- 0 0 0 0 0 0 0 0 0 0 0 0
57382- 0 0 0 0 0 0 0 0 0 0 0 0
57383- 0 0 0 0 0 0 6 6 6 18 18 18
57384- 50 50 50 74 74 74 2 2 6 2 2 6
57385- 14 14 14 70 70 70 34 34 34 62 62 62
57386-250 250 250 253 253 253 253 253 253 253 253 253
57387-253 253 253 253 253 253 253 253 253 253 253 253
57388-253 253 253 253 253 253 231 231 231 246 246 246
57389-253 253 253 253 253 253 253 253 253 253 253 253
57390-253 253 253 253 253 253 253 253 253 253 253 253
57391-253 253 253 253 253 253 253 253 253 253 253 253
57392-253 253 253 253 253 253 253 253 253 253 253 253
57393-253 253 253 253 253 253 234 234 234 14 14 14
57394- 2 2 6 2 2 6 30 30 30 2 2 6
57395- 2 2 6 2 2 6 2 2 6 2 2 6
57396- 2 2 6 66 66 66 62 62 62 22 22 22
57397- 6 6 6 0 0 0 0 0 0 0 0 0
57398- 0 0 0 0 0 0 0 0 0 0 0 0
57399- 0 0 0 0 0 0 0 0 0 0 0 0
57400- 0 0 0 0 0 0 0 0 0 0 0 0
57401- 0 0 0 0 0 0 0 0 0 0 0 0
57402- 0 0 0 0 0 0 0 0 0 0 0 0
57403- 0 0 0 0 0 0 6 6 6 18 18 18
57404- 54 54 54 62 62 62 2 2 6 2 2 6
57405- 2 2 6 30 30 30 46 46 46 70 70 70
57406-250 250 250 253 253 253 253 253 253 253 253 253
57407-253 253 253 253 253 253 253 253 253 253 253 253
57408-253 253 253 253 253 253 231 231 231 246 246 246
57409-253 253 253 253 253 253 253 253 253 253 253 253
57410-253 253 253 253 253 253 253 253 253 253 253 253
57411-253 253 253 253 253 253 253 253 253 253 253 253
57412-253 253 253 253 253 253 253 253 253 253 253 253
57413-253 253 253 253 253 253 226 226 226 10 10 10
57414- 2 2 6 6 6 6 30 30 30 2 2 6
57415- 2 2 6 2 2 6 2 2 6 2 2 6
57416- 2 2 6 66 66 66 58 58 58 22 22 22
57417- 6 6 6 0 0 0 0 0 0 0 0 0
57418- 0 0 0 0 0 0 0 0 0 0 0 0
57419- 0 0 0 0 0 0 0 0 0 0 0 0
57420- 0 0 0 0 0 0 0 0 0 0 0 0
57421- 0 0 0 0 0 0 0 0 0 0 0 0
57422- 0 0 0 0 0 0 0 0 0 0 0 0
57423- 0 0 0 0 0 0 6 6 6 22 22 22
57424- 58 58 58 62 62 62 2 2 6 2 2 6
57425- 2 2 6 2 2 6 30 30 30 78 78 78
57426-250 250 250 253 253 253 253 253 253 253 253 253
57427-253 253 253 253 253 253 253 253 253 253 253 253
57428-253 253 253 253 253 253 231 231 231 246 246 246
57429-253 253 253 253 253 253 253 253 253 253 253 253
57430-253 253 253 253 253 253 253 253 253 253 253 253
57431-253 253 253 253 253 253 253 253 253 253 253 253
57432-253 253 253 253 253 253 253 253 253 253 253 253
57433-253 253 253 253 253 253 206 206 206 2 2 6
57434- 22 22 22 34 34 34 18 14 6 22 22 22
57435- 26 26 26 18 18 18 6 6 6 2 2 6
57436- 2 2 6 82 82 82 54 54 54 18 18 18
57437- 6 6 6 0 0 0 0 0 0 0 0 0
57438- 0 0 0 0 0 0 0 0 0 0 0 0
57439- 0 0 0 0 0 0 0 0 0 0 0 0
57440- 0 0 0 0 0 0 0 0 0 0 0 0
57441- 0 0 0 0 0 0 0 0 0 0 0 0
57442- 0 0 0 0 0 0 0 0 0 0 0 0
57443- 0 0 0 0 0 0 6 6 6 26 26 26
57444- 62 62 62 106 106 106 74 54 14 185 133 11
57445-210 162 10 121 92 8 6 6 6 62 62 62
57446-238 238 238 253 253 253 253 253 253 253 253 253
57447-253 253 253 253 253 253 253 253 253 253 253 253
57448-253 253 253 253 253 253 231 231 231 246 246 246
57449-253 253 253 253 253 253 253 253 253 253 253 253
57450-253 253 253 253 253 253 253 253 253 253 253 253
57451-253 253 253 253 253 253 253 253 253 253 253 253
57452-253 253 253 253 253 253 253 253 253 253 253 253
57453-253 253 253 253 253 253 158 158 158 18 18 18
57454- 14 14 14 2 2 6 2 2 6 2 2 6
57455- 6 6 6 18 18 18 66 66 66 38 38 38
57456- 6 6 6 94 94 94 50 50 50 18 18 18
57457- 6 6 6 0 0 0 0 0 0 0 0 0
57458- 0 0 0 0 0 0 0 0 0 0 0 0
57459- 0 0 0 0 0 0 0 0 0 0 0 0
57460- 0 0 0 0 0 0 0 0 0 0 0 0
57461- 0 0 0 0 0 0 0 0 0 0 0 0
57462- 0 0 0 0 0 0 0 0 0 6 6 6
57463- 10 10 10 10 10 10 18 18 18 38 38 38
57464- 78 78 78 142 134 106 216 158 10 242 186 14
57465-246 190 14 246 190 14 156 118 10 10 10 10
57466- 90 90 90 238 238 238 253 253 253 253 253 253
57467-253 253 253 253 253 253 253 253 253 253 253 253
57468-253 253 253 253 253 253 231 231 231 250 250 250
57469-253 253 253 253 253 253 253 253 253 253 253 253
57470-253 253 253 253 253 253 253 253 253 253 253 253
57471-253 253 253 253 253 253 253 253 253 253 253 253
57472-253 253 253 253 253 253 253 253 253 246 230 190
57473-238 204 91 238 204 91 181 142 44 37 26 9
57474- 2 2 6 2 2 6 2 2 6 2 2 6
57475- 2 2 6 2 2 6 38 38 38 46 46 46
57476- 26 26 26 106 106 106 54 54 54 18 18 18
57477- 6 6 6 0 0 0 0 0 0 0 0 0
57478- 0 0 0 0 0 0 0 0 0 0 0 0
57479- 0 0 0 0 0 0 0 0 0 0 0 0
57480- 0 0 0 0 0 0 0 0 0 0 0 0
57481- 0 0 0 0 0 0 0 0 0 0 0 0
57482- 0 0 0 6 6 6 14 14 14 22 22 22
57483- 30 30 30 38 38 38 50 50 50 70 70 70
57484-106 106 106 190 142 34 226 170 11 242 186 14
57485-246 190 14 246 190 14 246 190 14 154 114 10
57486- 6 6 6 74 74 74 226 226 226 253 253 253
57487-253 253 253 253 253 253 253 253 253 253 253 253
57488-253 253 253 253 253 253 231 231 231 250 250 250
57489-253 253 253 253 253 253 253 253 253 253 253 253
57490-253 253 253 253 253 253 253 253 253 253 253 253
57491-253 253 253 253 253 253 253 253 253 253 253 253
57492-253 253 253 253 253 253 253 253 253 228 184 62
57493-241 196 14 241 208 19 232 195 16 38 30 10
57494- 2 2 6 2 2 6 2 2 6 2 2 6
57495- 2 2 6 6 6 6 30 30 30 26 26 26
57496-203 166 17 154 142 90 66 66 66 26 26 26
57497- 6 6 6 0 0 0 0 0 0 0 0 0
57498- 0 0 0 0 0 0 0 0 0 0 0 0
57499- 0 0 0 0 0 0 0 0 0 0 0 0
57500- 0 0 0 0 0 0 0 0 0 0 0 0
57501- 0 0 0 0 0 0 0 0 0 0 0 0
57502- 6 6 6 18 18 18 38 38 38 58 58 58
57503- 78 78 78 86 86 86 101 101 101 123 123 123
57504-175 146 61 210 150 10 234 174 13 246 186 14
57505-246 190 14 246 190 14 246 190 14 238 190 10
57506-102 78 10 2 2 6 46 46 46 198 198 198
57507-253 253 253 253 253 253 253 253 253 253 253 253
57508-253 253 253 253 253 253 234 234 234 242 242 242
57509-253 253 253 253 253 253 253 253 253 253 253 253
57510-253 253 253 253 253 253 253 253 253 253 253 253
57511-253 253 253 253 253 253 253 253 253 253 253 253
57512-253 253 253 253 253 253 253 253 253 224 178 62
57513-242 186 14 241 196 14 210 166 10 22 18 6
57514- 2 2 6 2 2 6 2 2 6 2 2 6
57515- 2 2 6 2 2 6 6 6 6 121 92 8
57516-238 202 15 232 195 16 82 82 82 34 34 34
57517- 10 10 10 0 0 0 0 0 0 0 0 0
57518- 0 0 0 0 0 0 0 0 0 0 0 0
57519- 0 0 0 0 0 0 0 0 0 0 0 0
57520- 0 0 0 0 0 0 0 0 0 0 0 0
57521- 0 0 0 0 0 0 0 0 0 0 0 0
57522- 14 14 14 38 38 38 70 70 70 154 122 46
57523-190 142 34 200 144 11 197 138 11 197 138 11
57524-213 154 11 226 170 11 242 186 14 246 190 14
57525-246 190 14 246 190 14 246 190 14 246 190 14
57526-225 175 15 46 32 6 2 2 6 22 22 22
57527-158 158 158 250 250 250 253 253 253 253 253 253
57528-253 253 253 253 253 253 253 253 253 253 253 253
57529-253 253 253 253 253 253 253 253 253 253 253 253
57530-253 253 253 253 253 253 253 253 253 253 253 253
57531-253 253 253 253 253 253 253 253 253 253 253 253
57532-253 253 253 250 250 250 242 242 242 224 178 62
57533-239 182 13 236 186 11 213 154 11 46 32 6
57534- 2 2 6 2 2 6 2 2 6 2 2 6
57535- 2 2 6 2 2 6 61 42 6 225 175 15
57536-238 190 10 236 186 11 112 100 78 42 42 42
57537- 14 14 14 0 0 0 0 0 0 0 0 0
57538- 0 0 0 0 0 0 0 0 0 0 0 0
57539- 0 0 0 0 0 0 0 0 0 0 0 0
57540- 0 0 0 0 0 0 0 0 0 0 0 0
57541- 0 0 0 0 0 0 0 0 0 6 6 6
57542- 22 22 22 54 54 54 154 122 46 213 154 11
57543-226 170 11 230 174 11 226 170 11 226 170 11
57544-236 178 12 242 186 14 246 190 14 246 190 14
57545-246 190 14 246 190 14 246 190 14 246 190 14
57546-241 196 14 184 144 12 10 10 10 2 2 6
57547- 6 6 6 116 116 116 242 242 242 253 253 253
57548-253 253 253 253 253 253 253 253 253 253 253 253
57549-253 253 253 253 253 253 253 253 253 253 253 253
57550-253 253 253 253 253 253 253 253 253 253 253 253
57551-253 253 253 253 253 253 253 253 253 253 253 253
57552-253 253 253 231 231 231 198 198 198 214 170 54
57553-236 178 12 236 178 12 210 150 10 137 92 6
57554- 18 14 6 2 2 6 2 2 6 2 2 6
57555- 6 6 6 70 47 6 200 144 11 236 178 12
57556-239 182 13 239 182 13 124 112 88 58 58 58
57557- 22 22 22 6 6 6 0 0 0 0 0 0
57558- 0 0 0 0 0 0 0 0 0 0 0 0
57559- 0 0 0 0 0 0 0 0 0 0 0 0
57560- 0 0 0 0 0 0 0 0 0 0 0 0
57561- 0 0 0 0 0 0 0 0 0 10 10 10
57562- 30 30 30 70 70 70 180 133 36 226 170 11
57563-239 182 13 242 186 14 242 186 14 246 186 14
57564-246 190 14 246 190 14 246 190 14 246 190 14
57565-246 190 14 246 190 14 246 190 14 246 190 14
57566-246 190 14 232 195 16 98 70 6 2 2 6
57567- 2 2 6 2 2 6 66 66 66 221 221 221
57568-253 253 253 253 253 253 253 253 253 253 253 253
57569-253 253 253 253 253 253 253 253 253 253 253 253
57570-253 253 253 253 253 253 253 253 253 253 253 253
57571-253 253 253 253 253 253 253 253 253 253 253 253
57572-253 253 253 206 206 206 198 198 198 214 166 58
57573-230 174 11 230 174 11 216 158 10 192 133 9
57574-163 110 8 116 81 8 102 78 10 116 81 8
57575-167 114 7 197 138 11 226 170 11 239 182 13
57576-242 186 14 242 186 14 162 146 94 78 78 78
57577- 34 34 34 14 14 14 6 6 6 0 0 0
57578- 0 0 0 0 0 0 0 0 0 0 0 0
57579- 0 0 0 0 0 0 0 0 0 0 0 0
57580- 0 0 0 0 0 0 0 0 0 0 0 0
57581- 0 0 0 0 0 0 0 0 0 6 6 6
57582- 30 30 30 78 78 78 190 142 34 226 170 11
57583-239 182 13 246 190 14 246 190 14 246 190 14
57584-246 190 14 246 190 14 246 190 14 246 190 14
57585-246 190 14 246 190 14 246 190 14 246 190 14
57586-246 190 14 241 196 14 203 166 17 22 18 6
57587- 2 2 6 2 2 6 2 2 6 38 38 38
57588-218 218 218 253 253 253 253 253 253 253 253 253
57589-253 253 253 253 253 253 253 253 253 253 253 253
57590-253 253 253 253 253 253 253 253 253 253 253 253
57591-253 253 253 253 253 253 253 253 253 253 253 253
57592-250 250 250 206 206 206 198 198 198 202 162 69
57593-226 170 11 236 178 12 224 166 10 210 150 10
57594-200 144 11 197 138 11 192 133 9 197 138 11
57595-210 150 10 226 170 11 242 186 14 246 190 14
57596-246 190 14 246 186 14 225 175 15 124 112 88
57597- 62 62 62 30 30 30 14 14 14 6 6 6
57598- 0 0 0 0 0 0 0 0 0 0 0 0
57599- 0 0 0 0 0 0 0 0 0 0 0 0
57600- 0 0 0 0 0 0 0 0 0 0 0 0
57601- 0 0 0 0 0 0 0 0 0 10 10 10
57602- 30 30 30 78 78 78 174 135 50 224 166 10
57603-239 182 13 246 190 14 246 190 14 246 190 14
57604-246 190 14 246 190 14 246 190 14 246 190 14
57605-246 190 14 246 190 14 246 190 14 246 190 14
57606-246 190 14 246 190 14 241 196 14 139 102 15
57607- 2 2 6 2 2 6 2 2 6 2 2 6
57608- 78 78 78 250 250 250 253 253 253 253 253 253
57609-253 253 253 253 253 253 253 253 253 253 253 253
57610-253 253 253 253 253 253 253 253 253 253 253 253
57611-253 253 253 253 253 253 253 253 253 253 253 253
57612-250 250 250 214 214 214 198 198 198 190 150 46
57613-219 162 10 236 178 12 234 174 13 224 166 10
57614-216 158 10 213 154 11 213 154 11 216 158 10
57615-226 170 11 239 182 13 246 190 14 246 190 14
57616-246 190 14 246 190 14 242 186 14 206 162 42
57617-101 101 101 58 58 58 30 30 30 14 14 14
57618- 6 6 6 0 0 0 0 0 0 0 0 0
57619- 0 0 0 0 0 0 0 0 0 0 0 0
57620- 0 0 0 0 0 0 0 0 0 0 0 0
57621- 0 0 0 0 0 0 0 0 0 10 10 10
57622- 30 30 30 74 74 74 174 135 50 216 158 10
57623-236 178 12 246 190 14 246 190 14 246 190 14
57624-246 190 14 246 190 14 246 190 14 246 190 14
57625-246 190 14 246 190 14 246 190 14 246 190 14
57626-246 190 14 246 190 14 241 196 14 226 184 13
57627- 61 42 6 2 2 6 2 2 6 2 2 6
57628- 22 22 22 238 238 238 253 253 253 253 253 253
57629-253 253 253 253 253 253 253 253 253 253 253 253
57630-253 253 253 253 253 253 253 253 253 253 253 253
57631-253 253 253 253 253 253 253 253 253 253 253 253
57632-253 253 253 226 226 226 187 187 187 180 133 36
57633-216 158 10 236 178 12 239 182 13 236 178 12
57634-230 174 11 226 170 11 226 170 11 230 174 11
57635-236 178 12 242 186 14 246 190 14 246 190 14
57636-246 190 14 246 190 14 246 186 14 239 182 13
57637-206 162 42 106 106 106 66 66 66 34 34 34
57638- 14 14 14 6 6 6 0 0 0 0 0 0
57639- 0 0 0 0 0 0 0 0 0 0 0 0
57640- 0 0 0 0 0 0 0 0 0 0 0 0
57641- 0 0 0 0 0 0 0 0 0 6 6 6
57642- 26 26 26 70 70 70 163 133 67 213 154 11
57643-236 178 12 246 190 14 246 190 14 246 190 14
57644-246 190 14 246 190 14 246 190 14 246 190 14
57645-246 190 14 246 190 14 246 190 14 246 190 14
57646-246 190 14 246 190 14 246 190 14 241 196 14
57647-190 146 13 18 14 6 2 2 6 2 2 6
57648- 46 46 46 246 246 246 253 253 253 253 253 253
57649-253 253 253 253 253 253 253 253 253 253 253 253
57650-253 253 253 253 253 253 253 253 253 253 253 253
57651-253 253 253 253 253 253 253 253 253 253 253 253
57652-253 253 253 221 221 221 86 86 86 156 107 11
57653-216 158 10 236 178 12 242 186 14 246 186 14
57654-242 186 14 239 182 13 239 182 13 242 186 14
57655-242 186 14 246 186 14 246 190 14 246 190 14
57656-246 190 14 246 190 14 246 190 14 246 190 14
57657-242 186 14 225 175 15 142 122 72 66 66 66
57658- 30 30 30 10 10 10 0 0 0 0 0 0
57659- 0 0 0 0 0 0 0 0 0 0 0 0
57660- 0 0 0 0 0 0 0 0 0 0 0 0
57661- 0 0 0 0 0 0 0 0 0 6 6 6
57662- 26 26 26 70 70 70 163 133 67 210 150 10
57663-236 178 12 246 190 14 246 190 14 246 190 14
57664-246 190 14 246 190 14 246 190 14 246 190 14
57665-246 190 14 246 190 14 246 190 14 246 190 14
57666-246 190 14 246 190 14 246 190 14 246 190 14
57667-232 195 16 121 92 8 34 34 34 106 106 106
57668-221 221 221 253 253 253 253 253 253 253 253 253
57669-253 253 253 253 253 253 253 253 253 253 253 253
57670-253 253 253 253 253 253 253 253 253 253 253 253
57671-253 253 253 253 253 253 253 253 253 253 253 253
57672-242 242 242 82 82 82 18 14 6 163 110 8
57673-216 158 10 236 178 12 242 186 14 246 190 14
57674-246 190 14 246 190 14 246 190 14 246 190 14
57675-246 190 14 246 190 14 246 190 14 246 190 14
57676-246 190 14 246 190 14 246 190 14 246 190 14
57677-246 190 14 246 190 14 242 186 14 163 133 67
57678- 46 46 46 18 18 18 6 6 6 0 0 0
57679- 0 0 0 0 0 0 0 0 0 0 0 0
57680- 0 0 0 0 0 0 0 0 0 0 0 0
57681- 0 0 0 0 0 0 0 0 0 10 10 10
57682- 30 30 30 78 78 78 163 133 67 210 150 10
57683-236 178 12 246 186 14 246 190 14 246 190 14
57684-246 190 14 246 190 14 246 190 14 246 190 14
57685-246 190 14 246 190 14 246 190 14 246 190 14
57686-246 190 14 246 190 14 246 190 14 246 190 14
57687-241 196 14 215 174 15 190 178 144 253 253 253
57688-253 253 253 253 253 253 253 253 253 253 253 253
57689-253 253 253 253 253 253 253 253 253 253 253 253
57690-253 253 253 253 253 253 253 253 253 253 253 253
57691-253 253 253 253 253 253 253 253 253 218 218 218
57692- 58 58 58 2 2 6 22 18 6 167 114 7
57693-216 158 10 236 178 12 246 186 14 246 190 14
57694-246 190 14 246 190 14 246 190 14 246 190 14
57695-246 190 14 246 190 14 246 190 14 246 190 14
57696-246 190 14 246 190 14 246 190 14 246 190 14
57697-246 190 14 246 186 14 242 186 14 190 150 46
57698- 54 54 54 22 22 22 6 6 6 0 0 0
57699- 0 0 0 0 0 0 0 0 0 0 0 0
57700- 0 0 0 0 0 0 0 0 0 0 0 0
57701- 0 0 0 0 0 0 0 0 0 14 14 14
57702- 38 38 38 86 86 86 180 133 36 213 154 11
57703-236 178 12 246 186 14 246 190 14 246 190 14
57704-246 190 14 246 190 14 246 190 14 246 190 14
57705-246 190 14 246 190 14 246 190 14 246 190 14
57706-246 190 14 246 190 14 246 190 14 246 190 14
57707-246 190 14 232 195 16 190 146 13 214 214 214
57708-253 253 253 253 253 253 253 253 253 253 253 253
57709-253 253 253 253 253 253 253 253 253 253 253 253
57710-253 253 253 253 253 253 253 253 253 253 253 253
57711-253 253 253 250 250 250 170 170 170 26 26 26
57712- 2 2 6 2 2 6 37 26 9 163 110 8
57713-219 162 10 239 182 13 246 186 14 246 190 14
57714-246 190 14 246 190 14 246 190 14 246 190 14
57715-246 190 14 246 190 14 246 190 14 246 190 14
57716-246 190 14 246 190 14 246 190 14 246 190 14
57717-246 186 14 236 178 12 224 166 10 142 122 72
57718- 46 46 46 18 18 18 6 6 6 0 0 0
57719- 0 0 0 0 0 0 0 0 0 0 0 0
57720- 0 0 0 0 0 0 0 0 0 0 0 0
57721- 0 0 0 0 0 0 6 6 6 18 18 18
57722- 50 50 50 109 106 95 192 133 9 224 166 10
57723-242 186 14 246 190 14 246 190 14 246 190 14
57724-246 190 14 246 190 14 246 190 14 246 190 14
57725-246 190 14 246 190 14 246 190 14 246 190 14
57726-246 190 14 246 190 14 246 190 14 246 190 14
57727-242 186 14 226 184 13 210 162 10 142 110 46
57728-226 226 226 253 253 253 253 253 253 253 253 253
57729-253 253 253 253 253 253 253 253 253 253 253 253
57730-253 253 253 253 253 253 253 253 253 253 253 253
57731-198 198 198 66 66 66 2 2 6 2 2 6
57732- 2 2 6 2 2 6 50 34 6 156 107 11
57733-219 162 10 239 182 13 246 186 14 246 190 14
57734-246 190 14 246 190 14 246 190 14 246 190 14
57735-246 190 14 246 190 14 246 190 14 246 190 14
57736-246 190 14 246 190 14 246 190 14 242 186 14
57737-234 174 13 213 154 11 154 122 46 66 66 66
57738- 30 30 30 10 10 10 0 0 0 0 0 0
57739- 0 0 0 0 0 0 0 0 0 0 0 0
57740- 0 0 0 0 0 0 0 0 0 0 0 0
57741- 0 0 0 0 0 0 6 6 6 22 22 22
57742- 58 58 58 154 121 60 206 145 10 234 174 13
57743-242 186 14 246 186 14 246 190 14 246 190 14
57744-246 190 14 246 190 14 246 190 14 246 190 14
57745-246 190 14 246 190 14 246 190 14 246 190 14
57746-246 190 14 246 190 14 246 190 14 246 190 14
57747-246 186 14 236 178 12 210 162 10 163 110 8
57748- 61 42 6 138 138 138 218 218 218 250 250 250
57749-253 253 253 253 253 253 253 253 253 250 250 250
57750-242 242 242 210 210 210 144 144 144 66 66 66
57751- 6 6 6 2 2 6 2 2 6 2 2 6
57752- 2 2 6 2 2 6 61 42 6 163 110 8
57753-216 158 10 236 178 12 246 190 14 246 190 14
57754-246 190 14 246 190 14 246 190 14 246 190 14
57755-246 190 14 246 190 14 246 190 14 246 190 14
57756-246 190 14 239 182 13 230 174 11 216 158 10
57757-190 142 34 124 112 88 70 70 70 38 38 38
57758- 18 18 18 6 6 6 0 0 0 0 0 0
57759- 0 0 0 0 0 0 0 0 0 0 0 0
57760- 0 0 0 0 0 0 0 0 0 0 0 0
57761- 0 0 0 0 0 0 6 6 6 22 22 22
57762- 62 62 62 168 124 44 206 145 10 224 166 10
57763-236 178 12 239 182 13 242 186 14 242 186 14
57764-246 186 14 246 190 14 246 190 14 246 190 14
57765-246 190 14 246 190 14 246 190 14 246 190 14
57766-246 190 14 246 190 14 246 190 14 246 190 14
57767-246 190 14 236 178 12 216 158 10 175 118 6
57768- 80 54 7 2 2 6 6 6 6 30 30 30
57769- 54 54 54 62 62 62 50 50 50 38 38 38
57770- 14 14 14 2 2 6 2 2 6 2 2 6
57771- 2 2 6 2 2 6 2 2 6 2 2 6
57772- 2 2 6 6 6 6 80 54 7 167 114 7
57773-213 154 11 236 178 12 246 190 14 246 190 14
57774-246 190 14 246 190 14 246 190 14 246 190 14
57775-246 190 14 242 186 14 239 182 13 239 182 13
57776-230 174 11 210 150 10 174 135 50 124 112 88
57777- 82 82 82 54 54 54 34 34 34 18 18 18
57778- 6 6 6 0 0 0 0 0 0 0 0 0
57779- 0 0 0 0 0 0 0 0 0 0 0 0
57780- 0 0 0 0 0 0 0 0 0 0 0 0
57781- 0 0 0 0 0 0 6 6 6 18 18 18
57782- 50 50 50 158 118 36 192 133 9 200 144 11
57783-216 158 10 219 162 10 224 166 10 226 170 11
57784-230 174 11 236 178 12 239 182 13 239 182 13
57785-242 186 14 246 186 14 246 190 14 246 190 14
57786-246 190 14 246 190 14 246 190 14 246 190 14
57787-246 186 14 230 174 11 210 150 10 163 110 8
57788-104 69 6 10 10 10 2 2 6 2 2 6
57789- 2 2 6 2 2 6 2 2 6 2 2 6
57790- 2 2 6 2 2 6 2 2 6 2 2 6
57791- 2 2 6 2 2 6 2 2 6 2 2 6
57792- 2 2 6 6 6 6 91 60 6 167 114 7
57793-206 145 10 230 174 11 242 186 14 246 190 14
57794-246 190 14 246 190 14 246 186 14 242 186 14
57795-239 182 13 230 174 11 224 166 10 213 154 11
57796-180 133 36 124 112 88 86 86 86 58 58 58
57797- 38 38 38 22 22 22 10 10 10 6 6 6
57798- 0 0 0 0 0 0 0 0 0 0 0 0
57799- 0 0 0 0 0 0 0 0 0 0 0 0
57800- 0 0 0 0 0 0 0 0 0 0 0 0
57801- 0 0 0 0 0 0 0 0 0 14 14 14
57802- 34 34 34 70 70 70 138 110 50 158 118 36
57803-167 114 7 180 123 7 192 133 9 197 138 11
57804-200 144 11 206 145 10 213 154 11 219 162 10
57805-224 166 10 230 174 11 239 182 13 242 186 14
57806-246 186 14 246 186 14 246 186 14 246 186 14
57807-239 182 13 216 158 10 185 133 11 152 99 6
57808-104 69 6 18 14 6 2 2 6 2 2 6
57809- 2 2 6 2 2 6 2 2 6 2 2 6
57810- 2 2 6 2 2 6 2 2 6 2 2 6
57811- 2 2 6 2 2 6 2 2 6 2 2 6
57812- 2 2 6 6 6 6 80 54 7 152 99 6
57813-192 133 9 219 162 10 236 178 12 239 182 13
57814-246 186 14 242 186 14 239 182 13 236 178 12
57815-224 166 10 206 145 10 192 133 9 154 121 60
57816- 94 94 94 62 62 62 42 42 42 22 22 22
57817- 14 14 14 6 6 6 0 0 0 0 0 0
57818- 0 0 0 0 0 0 0 0 0 0 0 0
57819- 0 0 0 0 0 0 0 0 0 0 0 0
57820- 0 0 0 0 0 0 0 0 0 0 0 0
57821- 0 0 0 0 0 0 0 0 0 6 6 6
57822- 18 18 18 34 34 34 58 58 58 78 78 78
57823-101 98 89 124 112 88 142 110 46 156 107 11
57824-163 110 8 167 114 7 175 118 6 180 123 7
57825-185 133 11 197 138 11 210 150 10 219 162 10
57826-226 170 11 236 178 12 236 178 12 234 174 13
57827-219 162 10 197 138 11 163 110 8 130 83 6
57828- 91 60 6 10 10 10 2 2 6 2 2 6
57829- 18 18 18 38 38 38 38 38 38 38 38 38
57830- 38 38 38 38 38 38 38 38 38 38 38 38
57831- 38 38 38 38 38 38 26 26 26 2 2 6
57832- 2 2 6 6 6 6 70 47 6 137 92 6
57833-175 118 6 200 144 11 219 162 10 230 174 11
57834-234 174 13 230 174 11 219 162 10 210 150 10
57835-192 133 9 163 110 8 124 112 88 82 82 82
57836- 50 50 50 30 30 30 14 14 14 6 6 6
57837- 0 0 0 0 0 0 0 0 0 0 0 0
57838- 0 0 0 0 0 0 0 0 0 0 0 0
57839- 0 0 0 0 0 0 0 0 0 0 0 0
57840- 0 0 0 0 0 0 0 0 0 0 0 0
57841- 0 0 0 0 0 0 0 0 0 0 0 0
57842- 6 6 6 14 14 14 22 22 22 34 34 34
57843- 42 42 42 58 58 58 74 74 74 86 86 86
57844-101 98 89 122 102 70 130 98 46 121 87 25
57845-137 92 6 152 99 6 163 110 8 180 123 7
57846-185 133 11 197 138 11 206 145 10 200 144 11
57847-180 123 7 156 107 11 130 83 6 104 69 6
57848- 50 34 6 54 54 54 110 110 110 101 98 89
57849- 86 86 86 82 82 82 78 78 78 78 78 78
57850- 78 78 78 78 78 78 78 78 78 78 78 78
57851- 78 78 78 82 82 82 86 86 86 94 94 94
57852-106 106 106 101 101 101 86 66 34 124 80 6
57853-156 107 11 180 123 7 192 133 9 200 144 11
57854-206 145 10 200 144 11 192 133 9 175 118 6
57855-139 102 15 109 106 95 70 70 70 42 42 42
57856- 22 22 22 10 10 10 0 0 0 0 0 0
57857- 0 0 0 0 0 0 0 0 0 0 0 0
57858- 0 0 0 0 0 0 0 0 0 0 0 0
57859- 0 0 0 0 0 0 0 0 0 0 0 0
57860- 0 0 0 0 0 0 0 0 0 0 0 0
57861- 0 0 0 0 0 0 0 0 0 0 0 0
57862- 0 0 0 0 0 0 6 6 6 10 10 10
57863- 14 14 14 22 22 22 30 30 30 38 38 38
57864- 50 50 50 62 62 62 74 74 74 90 90 90
57865-101 98 89 112 100 78 121 87 25 124 80 6
57866-137 92 6 152 99 6 152 99 6 152 99 6
57867-138 86 6 124 80 6 98 70 6 86 66 30
57868-101 98 89 82 82 82 58 58 58 46 46 46
57869- 38 38 38 34 34 34 34 34 34 34 34 34
57870- 34 34 34 34 34 34 34 34 34 34 34 34
57871- 34 34 34 34 34 34 38 38 38 42 42 42
57872- 54 54 54 82 82 82 94 86 76 91 60 6
57873-134 86 6 156 107 11 167 114 7 175 118 6
57874-175 118 6 167 114 7 152 99 6 121 87 25
57875-101 98 89 62 62 62 34 34 34 18 18 18
57876- 6 6 6 0 0 0 0 0 0 0 0 0
57877- 0 0 0 0 0 0 0 0 0 0 0 0
57878- 0 0 0 0 0 0 0 0 0 0 0 0
57879- 0 0 0 0 0 0 0 0 0 0 0 0
57880- 0 0 0 0 0 0 0 0 0 0 0 0
57881- 0 0 0 0 0 0 0 0 0 0 0 0
57882- 0 0 0 0 0 0 0 0 0 0 0 0
57883- 0 0 0 6 6 6 6 6 6 10 10 10
57884- 18 18 18 22 22 22 30 30 30 42 42 42
57885- 50 50 50 66 66 66 86 86 86 101 98 89
57886-106 86 58 98 70 6 104 69 6 104 69 6
57887-104 69 6 91 60 6 82 62 34 90 90 90
57888- 62 62 62 38 38 38 22 22 22 14 14 14
57889- 10 10 10 10 10 10 10 10 10 10 10 10
57890- 10 10 10 10 10 10 6 6 6 10 10 10
57891- 10 10 10 10 10 10 10 10 10 14 14 14
57892- 22 22 22 42 42 42 70 70 70 89 81 66
57893- 80 54 7 104 69 6 124 80 6 137 92 6
57894-134 86 6 116 81 8 100 82 52 86 86 86
57895- 58 58 58 30 30 30 14 14 14 6 6 6
57896- 0 0 0 0 0 0 0 0 0 0 0 0
57897- 0 0 0 0 0 0 0 0 0 0 0 0
57898- 0 0 0 0 0 0 0 0 0 0 0 0
57899- 0 0 0 0 0 0 0 0 0 0 0 0
57900- 0 0 0 0 0 0 0 0 0 0 0 0
57901- 0 0 0 0 0 0 0 0 0 0 0 0
57902- 0 0 0 0 0 0 0 0 0 0 0 0
57903- 0 0 0 0 0 0 0 0 0 0 0 0
57904- 0 0 0 6 6 6 10 10 10 14 14 14
57905- 18 18 18 26 26 26 38 38 38 54 54 54
57906- 70 70 70 86 86 86 94 86 76 89 81 66
57907- 89 81 66 86 86 86 74 74 74 50 50 50
57908- 30 30 30 14 14 14 6 6 6 0 0 0
57909- 0 0 0 0 0 0 0 0 0 0 0 0
57910- 0 0 0 0 0 0 0 0 0 0 0 0
57911- 0 0 0 0 0 0 0 0 0 0 0 0
57912- 6 6 6 18 18 18 34 34 34 58 58 58
57913- 82 82 82 89 81 66 89 81 66 89 81 66
57914- 94 86 66 94 86 76 74 74 74 50 50 50
57915- 26 26 26 14 14 14 6 6 6 0 0 0
57916- 0 0 0 0 0 0 0 0 0 0 0 0
57917- 0 0 0 0 0 0 0 0 0 0 0 0
57918- 0 0 0 0 0 0 0 0 0 0 0 0
57919- 0 0 0 0 0 0 0 0 0 0 0 0
57920- 0 0 0 0 0 0 0 0 0 0 0 0
57921- 0 0 0 0 0 0 0 0 0 0 0 0
57922- 0 0 0 0 0 0 0 0 0 0 0 0
57923- 0 0 0 0 0 0 0 0 0 0 0 0
57924- 0 0 0 0 0 0 0 0 0 0 0 0
57925- 6 6 6 6 6 6 14 14 14 18 18 18
57926- 30 30 30 38 38 38 46 46 46 54 54 54
57927- 50 50 50 42 42 42 30 30 30 18 18 18
57928- 10 10 10 0 0 0 0 0 0 0 0 0
57929- 0 0 0 0 0 0 0 0 0 0 0 0
57930- 0 0 0 0 0 0 0 0 0 0 0 0
57931- 0 0 0 0 0 0 0 0 0 0 0 0
57932- 0 0 0 6 6 6 14 14 14 26 26 26
57933- 38 38 38 50 50 50 58 58 58 58 58 58
57934- 54 54 54 42 42 42 30 30 30 18 18 18
57935- 10 10 10 0 0 0 0 0 0 0 0 0
57936- 0 0 0 0 0 0 0 0 0 0 0 0
57937- 0 0 0 0 0 0 0 0 0 0 0 0
57938- 0 0 0 0 0 0 0 0 0 0 0 0
57939- 0 0 0 0 0 0 0 0 0 0 0 0
57940- 0 0 0 0 0 0 0 0 0 0 0 0
57941- 0 0 0 0 0 0 0 0 0 0 0 0
57942- 0 0 0 0 0 0 0 0 0 0 0 0
57943- 0 0 0 0 0 0 0 0 0 0 0 0
57944- 0 0 0 0 0 0 0 0 0 0 0 0
57945- 0 0 0 0 0 0 0 0 0 6 6 6
57946- 6 6 6 10 10 10 14 14 14 18 18 18
57947- 18 18 18 14 14 14 10 10 10 6 6 6
57948- 0 0 0 0 0 0 0 0 0 0 0 0
57949- 0 0 0 0 0 0 0 0 0 0 0 0
57950- 0 0 0 0 0 0 0 0 0 0 0 0
57951- 0 0 0 0 0 0 0 0 0 0 0 0
57952- 0 0 0 0 0 0 0 0 0 6 6 6
57953- 14 14 14 18 18 18 22 22 22 22 22 22
57954- 18 18 18 14 14 14 10 10 10 6 6 6
57955- 0 0 0 0 0 0 0 0 0 0 0 0
57956- 0 0 0 0 0 0 0 0 0 0 0 0
57957- 0 0 0 0 0 0 0 0 0 0 0 0
57958- 0 0 0 0 0 0 0 0 0 0 0 0
57959- 0 0 0 0 0 0 0 0 0 0 0 0
57960+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57961+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57966+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57969+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57970+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57973+4 4 4 4 4 4
57974+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57975+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57983+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57987+4 4 4 4 4 4
57988+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57989+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57994+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57997+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58001+4 4 4 4 4 4
58002+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58003+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58011+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58015+4 4 4 4 4 4
58016+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58029+4 4 4 4 4 4
58030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58043+4 4 4 4 4 4
58044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58048+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
58049+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
58050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58051+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58053+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
58054+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58055+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
58056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58057+4 4 4 4 4 4
58058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58062+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
58063+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
58064+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58065+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58067+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
58068+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
58069+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
58070+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58071+4 4 4 4 4 4
58072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58076+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
58077+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
58078+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58079+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58081+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
58082+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
58083+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
58084+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
58085+4 4 4 4 4 4
58086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58089+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
58090+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
58091+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
58092+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
58093+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58094+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
58095+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
58096+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
58097+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
58098+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
58099+4 4 4 4 4 4
58100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58103+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
58104+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
58105+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
58106+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
58107+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58108+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
58109+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
58110+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
58111+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
58112+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
58113+4 4 4 4 4 4
58114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58117+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
58118+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
58119+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
58120+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
58121+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
58122+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
58123+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
58124+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
58125+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
58126+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
58127+4 4 4 4 4 4
58128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58130+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
58131+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
58132+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
58133+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
58134+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
58135+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
58136+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
58137+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
58138+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
58139+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
58140+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
58141+4 4 4 4 4 4
58142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58144+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
58145+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
58146+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
58147+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
58148+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
58149+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
58150+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
58151+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
58152+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
58153+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
58154+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
58155+4 4 4 4 4 4
58156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58158+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
58159+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
58160+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
58161+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
58162+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
58163+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
58164+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
58165+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
58166+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
58167+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
58168+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58169+4 4 4 4 4 4
58170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58172+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
58173+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
58174+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
58175+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
58176+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
58177+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
58178+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
58179+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
58180+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
58181+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
58182+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
58183+4 4 4 4 4 4
58184+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58185+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
58186+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
58187+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
58188+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
58189+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
58190+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
58191+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
58192+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
58193+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
58194+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
58195+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
58196+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
58197+4 4 4 4 4 4
58198+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58199+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
58200+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
58201+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
58202+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
58203+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
58204+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
58205+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
58206+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
58207+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
58208+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
58209+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
58210+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
58211+0 0 0 4 4 4
58212+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
58213+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
58214+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
58215+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
58216+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
58217+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
58218+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
58219+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
58220+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
58221+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
58222+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
58223+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
58224+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
58225+2 0 0 0 0 0
58226+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
58227+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
58228+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
58229+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
58230+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
58231+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
58232+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
58233+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
58234+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
58235+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
58236+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
58237+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
58238+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
58239+37 38 37 0 0 0
58240+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
58241+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
58242+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
58243+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
58244+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
58245+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
58246+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
58247+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
58248+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
58249+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
58250+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
58251+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
58252+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
58253+85 115 134 4 0 0
58254+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
58255+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
58256+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
58257+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
58258+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
58259+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
58260+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
58261+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
58262+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
58263+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
58264+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
58265+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
58266+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
58267+60 73 81 4 0 0
58268+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
58269+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
58270+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
58271+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
58272+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
58273+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
58274+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
58275+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
58276+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
58277+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
58278+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
58279+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
58280+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
58281+16 19 21 4 0 0
58282+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
58283+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
58284+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
58285+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
58286+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
58287+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
58288+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
58289+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
58290+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
58291+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
58292+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
58293+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
58294+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
58295+4 0 0 4 3 3
58296+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
58297+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
58298+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
58299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
58300+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
58301+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
58302+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
58303+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
58304+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
58305+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
58306+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
58307+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
58308+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
58309+3 2 2 4 4 4
58310+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
58311+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
58312+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
58313+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
58314+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
58315+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
58316+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
58317+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
58318+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
58319+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
58320+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
58321+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
58322+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
58323+4 4 4 4 4 4
58324+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
58325+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
58326+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
58327+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
58328+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
58329+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
58330+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
58331+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
58332+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
58333+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
58334+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
58335+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
58336+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
58337+4 4 4 4 4 4
58338+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
58339+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
58340+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
58341+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
58342+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
58343+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
58344+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
58345+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
58346+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
58347+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
58348+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
58349+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
58350+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
58351+5 5 5 5 5 5
58352+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
58353+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
58354+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
58355+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
58356+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
58357+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58358+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
58359+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
58360+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
58361+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
58362+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
58363+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
58364+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
58365+5 5 5 4 4 4
58366+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
58367+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
58368+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
58369+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
58370+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58371+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
58372+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
58373+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
58374+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
58375+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
58376+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
58377+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
58378+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58379+4 4 4 4 4 4
58380+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
58381+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
58382+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
58383+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
58384+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
58385+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58386+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58387+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
58388+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
58389+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
58390+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
58391+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
58392+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58393+4 4 4 4 4 4
58394+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
58395+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
58396+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
58397+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
58398+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58399+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
58400+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
58401+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
58402+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
58403+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
58404+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
58405+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58406+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58407+4 4 4 4 4 4
58408+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
58409+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
58410+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
58411+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
58412+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58413+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58414+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58415+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
58416+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
58417+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
58418+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
58419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58420+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58421+4 4 4 4 4 4
58422+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
58423+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
58424+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
58425+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
58426+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58427+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
58428+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
58429+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
58430+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
58431+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
58432+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58433+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58434+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58435+4 4 4 4 4 4
58436+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
58437+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
58438+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
58439+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
58440+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58441+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
58442+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
58443+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
58444+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
58445+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
58446+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
58447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58448+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58449+4 4 4 4 4 4
58450+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
58451+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
58452+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
58453+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
58454+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58455+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
58456+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
58457+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
58458+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
58459+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
58460+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
58461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58462+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58463+4 4 4 4 4 4
58464+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
58465+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
58466+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
58467+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
58468+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
58469+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
58470+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
58471+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
58472+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
58473+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
58474+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58475+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58476+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58477+4 4 4 4 4 4
58478+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
58479+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
58480+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
58481+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
58482+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58483+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
58484+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
58485+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
58486+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
58487+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
58488+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58489+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58490+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58491+4 4 4 4 4 4
58492+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
58493+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
58494+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
58495+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
58496+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58497+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
58498+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
58499+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
58500+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
58501+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
58502+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58503+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58504+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58505+4 4 4 4 4 4
58506+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
58507+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
58508+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
58509+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
58510+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58511+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
58512+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
58513+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
58514+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
58515+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58516+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58517+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58518+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58519+4 4 4 4 4 4
58520+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
58521+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
58522+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
58523+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
58524+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
58525+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
58526+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
58527+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
58528+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58529+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58530+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58531+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58532+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58533+4 4 4 4 4 4
58534+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
58535+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
58536+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
58537+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
58538+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58539+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
58540+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
58541+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
58542+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
58543+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58544+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58545+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58546+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58547+4 4 4 4 4 4
58548+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
58549+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
58550+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
58551+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
58552+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
58553+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
58554+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
58555+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
58556+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58557+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58558+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58559+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58560+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58561+4 4 4 4 4 4
58562+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
58563+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
58564+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
58565+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
58566+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
58567+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
58568+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
58569+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
58570+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
58571+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58572+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58573+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58574+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58575+4 4 4 4 4 4
58576+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
58577+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
58578+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
58579+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
58580+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
58581+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
58582+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
58583+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
58584+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58585+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58586+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58587+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58588+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58589+4 4 4 4 4 4
58590+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
58591+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
58592+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
58593+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
58594+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
58595+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
58596+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
58597+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
58598+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
58599+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58600+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58601+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58602+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58603+4 4 4 4 4 4
58604+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
58605+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
58606+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
58607+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
58608+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
58609+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
58610+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
58611+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
58612+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58613+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58614+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58615+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58616+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58617+4 4 4 4 4 4
58618+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
58619+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
58620+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
58621+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
58622+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
58623+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
58624+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
58625+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
58626+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
58627+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58628+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58629+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58630+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58631+4 4 4 4 4 4
58632+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
58633+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
58634+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
58635+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
58636+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
58637+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
58638+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58639+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
58640+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58641+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58642+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58643+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58644+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58645+4 4 4 4 4 4
58646+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
58647+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
58648+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
58649+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
58650+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
58651+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
58652+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58653+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
58654+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
58655+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58656+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58657+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58658+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58659+4 4 4 4 4 4
58660+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
58661+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
58662+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
58663+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
58664+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
58665+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
58666+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
58667+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
58668+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
58669+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58670+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58671+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58672+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58673+4 4 4 4 4 4
58674+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
58675+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
58676+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
58677+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
58678+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
58679+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
58680+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
58681+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
58682+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
58683+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58684+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58685+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58686+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58687+4 4 4 4 4 4
58688+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
58689+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
58690+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
58691+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
58692+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
58693+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
58694+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
58695+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
58696+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
58697+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58698+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58699+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58700+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58701+4 4 4 4 4 4
58702+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
58703+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
58704+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
58705+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
58706+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
58707+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
58708+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
58709+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
58710+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
58711+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58712+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58713+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58714+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58715+4 4 4 4 4 4
58716+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
58717+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
58718+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
58719+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
58720+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
58721+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
58722+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
58723+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
58724+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
58725+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
58726+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58727+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58728+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58729+4 4 4 4 4 4
58730+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
58731+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
58732+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
58733+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
58734+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
58735+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
58736+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
58737+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
58738+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
58739+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
58740+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58741+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58742+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58743+4 4 4 4 4 4
58744+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
58745+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
58746+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
58747+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
58748+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
58749+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
58750+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58751+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
58752+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
58753+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
58754+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58755+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58756+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58757+4 4 4 4 4 4
58758+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
58759+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
58760+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
58761+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
58762+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
58763+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
58764+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
58765+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
58766+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
58767+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
58768+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58769+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58770+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58771+4 4 4 4 4 4
58772+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
58773+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
58774+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
58775+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
58776+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
58777+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
58778+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
58779+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
58780+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
58781+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
58782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58783+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58784+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58785+4 4 4 4 4 4
58786+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
58787+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
58788+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
58789+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
58790+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
58791+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
58792+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
58793+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
58794+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
58795+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
58796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58799+4 4 4 4 4 4
58800+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
58801+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
58802+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
58803+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
58804+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
58805+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
58806+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
58807+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
58808+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
58809+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
58810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58811+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58812+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58813+4 4 4 4 4 4
58814+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
58815+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
58816+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
58817+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
58818+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
58819+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
58820+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
58821+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
58822+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
58823+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58827+4 4 4 4 4 4
58828+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
58829+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
58830+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
58831+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
58832+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
58833+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
58834+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
58835+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
58836+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
58837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58840+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58841+4 4 4 4 4 4
58842+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
58843+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
58844+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
58845+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
58846+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
58847+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
58848+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
58849+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
58850+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
58851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58853+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58854+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58855+4 4 4 4 4 4
58856+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
58857+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
58858+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
58859+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
58860+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
58861+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
58862+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
58863+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
58864+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58866+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58867+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58868+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58869+4 4 4 4 4 4
58870+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
58871+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
58872+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
58873+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
58874+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
58875+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
58876+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
58877+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
58878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58879+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58880+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58881+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58882+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58883+4 4 4 4 4 4
58884+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
58885+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
58886+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
58887+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
58888+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
58889+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
58890+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
58891+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
58892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58893+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58894+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58895+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58896+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58897+4 4 4 4 4 4
58898+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58899+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
58900+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
58901+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
58902+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
58903+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
58904+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
58905+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
58906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58907+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58908+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58909+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58910+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58911+4 4 4 4 4 4
58912+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58913+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
58914+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
58915+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
58916+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
58917+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
58918+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
58919+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
58920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58921+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58922+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58923+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58924+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58925+4 4 4 4 4 4
58926+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58927+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
58928+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
58929+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
58930+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
58931+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
58932+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
58933+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58936+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58937+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58938+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58939+4 4 4 4 4 4
58940+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58941+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58942+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
58943+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
58944+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
58945+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
58946+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
58947+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58948+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58949+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58950+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58951+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58952+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58953+4 4 4 4 4 4
58954+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58955+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58956+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58957+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
58958+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
58959+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
58960+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
58961+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58966+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58967+4 4 4 4 4 4
58968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58969+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58970+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58971+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
58972+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
58973+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
58974+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
58975+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58981+4 4 4 4 4 4
58982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58983+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58985+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
58986+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
58987+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
58988+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
58989+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58994+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58995+4 4 4 4 4 4
58996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58997+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58999+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
59000+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
59001+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
59002+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
59003+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59009+4 4 4 4 4 4
59010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59011+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
59014+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
59015+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
59016+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59023+4 4 4 4 4 4
59024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59028+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
59029+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
59030+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
59031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59037+4 4 4 4 4 4
59038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59042+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
59043+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
59044+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59051+4 4 4 4 4 4
59052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59056+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
59057+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
59058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59065+4 4 4 4 4 4
59066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59070+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
59071+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
59072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59078+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59079+4 4 4 4 4 4
59080diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
59081index fef20db..d28b1ab 100644
59082--- a/drivers/xen/xenfs/xenstored.c
59083+++ b/drivers/xen/xenfs/xenstored.c
59084@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
59085 static int xsd_kva_open(struct inode *inode, struct file *file)
59086 {
59087 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
59088+#ifdef CONFIG_GRKERNSEC_HIDESYM
59089+ NULL);
59090+#else
59091 xen_store_interface);
59092+#endif
59093+
59094 if (!file->private_data)
59095 return -ENOMEM;
59096 return 0;
59097diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
59098index cc1cfae..41158ad 100644
59099--- a/fs/9p/vfs_addr.c
59100+++ b/fs/9p/vfs_addr.c
59101@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
59102
59103 retval = v9fs_file_write_internal(inode,
59104 v9inode->writeback_fid,
59105- (__force const char __user *)buffer,
59106+ (const char __force_user *)buffer,
59107 len, &offset, 0);
59108 if (retval > 0)
59109 retval = 0;
59110diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
59111index 7fa4f7a..a7ebf8c 100644
59112--- a/fs/9p/vfs_inode.c
59113+++ b/fs/9p/vfs_inode.c
59114@@ -1312,7 +1312,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
59115 void
59116 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
59117 {
59118- char *s = nd_get_link(nd);
59119+ const char *s = nd_get_link(nd);
59120
59121 p9_debug(P9_DEBUG_VFS, " %s %s\n",
59122 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
59123diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
59124index 370b24c..ff0be7b 100644
59125--- a/fs/Kconfig.binfmt
59126+++ b/fs/Kconfig.binfmt
59127@@ -103,7 +103,7 @@ config HAVE_AOUT
59128
59129 config BINFMT_AOUT
59130 tristate "Kernel support for a.out and ECOFF binaries"
59131- depends on HAVE_AOUT
59132+ depends on HAVE_AOUT && BROKEN
59133 ---help---
59134 A.out (Assembler.OUTput) is a set of formats for libraries and
59135 executables used in the earliest versions of UNIX. Linux used
59136diff --git a/fs/afs/inode.c b/fs/afs/inode.c
59137index 2946712..f737435 100644
59138--- a/fs/afs/inode.c
59139+++ b/fs/afs/inode.c
59140@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
59141 struct afs_vnode *vnode;
59142 struct super_block *sb;
59143 struct inode *inode;
59144- static atomic_t afs_autocell_ino;
59145+ static atomic_unchecked_t afs_autocell_ino;
59146
59147 _enter("{%x:%u},%*.*s,",
59148 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
59149@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
59150 data.fid.unique = 0;
59151 data.fid.vnode = 0;
59152
59153- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
59154+ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
59155 afs_iget5_autocell_test, afs_iget5_set,
59156 &data);
59157 if (!inode) {
59158diff --git a/fs/aio.c b/fs/aio.c
59159index 7337500..2058af6 100644
59160--- a/fs/aio.c
59161+++ b/fs/aio.c
59162@@ -380,7 +380,7 @@ static int aio_setup_ring(struct kioctx *ctx)
59163 size += sizeof(struct io_event) * nr_events;
59164
59165 nr_pages = PFN_UP(size);
59166- if (nr_pages < 0)
59167+ if (nr_pages <= 0)
59168 return -EINVAL;
59169
59170 file = aio_private_file(ctx, nr_pages);
59171diff --git a/fs/attr.c b/fs/attr.c
59172index 6530ced..4a827e2 100644
59173--- a/fs/attr.c
59174+++ b/fs/attr.c
59175@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
59176 unsigned long limit;
59177
59178 limit = rlimit(RLIMIT_FSIZE);
59179+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
59180 if (limit != RLIM_INFINITY && offset > limit)
59181 goto out_sig;
59182 if (offset > inode->i_sb->s_maxbytes)
59183diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
59184index 116fd38..c04182da 100644
59185--- a/fs/autofs4/waitq.c
59186+++ b/fs/autofs4/waitq.c
59187@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
59188 {
59189 unsigned long sigpipe, flags;
59190 mm_segment_t fs;
59191- const char *data = (const char *)addr;
59192+ const char __user *data = (const char __force_user *)addr;
59193 ssize_t wr = 0;
59194
59195 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
59196@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
59197 return 1;
59198 }
59199
59200+#ifdef CONFIG_GRKERNSEC_HIDESYM
59201+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
59202+#endif
59203+
59204 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
59205 enum autofs_notify notify)
59206 {
59207@@ -385,7 +389,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
59208
59209 /* If this is a direct mount request create a dummy name */
59210 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
59211+#ifdef CONFIG_GRKERNSEC_HIDESYM
59212+ /* this name does get written to userland via autofs4_write() */
59213+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
59214+#else
59215 qstr.len = sprintf(name, "%p", dentry);
59216+#endif
59217 else {
59218 qstr.len = autofs4_getpath(sbi, dentry, &name);
59219 if (!qstr.len) {
59220diff --git a/fs/befs/endian.h b/fs/befs/endian.h
59221index 2722387..56059b5 100644
59222--- a/fs/befs/endian.h
59223+++ b/fs/befs/endian.h
59224@@ -11,7 +11,7 @@
59225
59226 #include <asm/byteorder.h>
59227
59228-static inline u64
59229+static inline u64 __intentional_overflow(-1)
59230 fs64_to_cpu(const struct super_block *sb, fs64 n)
59231 {
59232 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
59233@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
59234 return (__force fs64)cpu_to_be64(n);
59235 }
59236
59237-static inline u32
59238+static inline u32 __intentional_overflow(-1)
59239 fs32_to_cpu(const struct super_block *sb, fs32 n)
59240 {
59241 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
59242@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
59243 return (__force fs32)cpu_to_be32(n);
59244 }
59245
59246-static inline u16
59247+static inline u16 __intentional_overflow(-1)
59248 fs16_to_cpu(const struct super_block *sb, fs16 n)
59249 {
59250 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
59251diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
59252index ca0ba15..0fa3257 100644
59253--- a/fs/binfmt_aout.c
59254+++ b/fs/binfmt_aout.c
59255@@ -16,6 +16,7 @@
59256 #include <linux/string.h>
59257 #include <linux/fs.h>
59258 #include <linux/file.h>
59259+#include <linux/security.h>
59260 #include <linux/stat.h>
59261 #include <linux/fcntl.h>
59262 #include <linux/ptrace.h>
59263@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
59264 #endif
59265 # define START_STACK(u) ((void __user *)u.start_stack)
59266
59267+ memset(&dump, 0, sizeof(dump));
59268+
59269 fs = get_fs();
59270 set_fs(KERNEL_DS);
59271 has_dumped = 1;
59272@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
59273
59274 /* If the size of the dump file exceeds the rlimit, then see what would happen
59275 if we wrote the stack, but not the data area. */
59276+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
59277 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
59278 dump.u_dsize = 0;
59279
59280 /* Make sure we have enough room to write the stack and data areas. */
59281+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
59282 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
59283 dump.u_ssize = 0;
59284
59285@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
59286 rlim = rlimit(RLIMIT_DATA);
59287 if (rlim >= RLIM_INFINITY)
59288 rlim = ~0;
59289+
59290+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
59291 if (ex.a_data + ex.a_bss > rlim)
59292 return -ENOMEM;
59293
59294@@ -264,6 +271,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
59295
59296 install_exec_creds(bprm);
59297
59298+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59299+ current->mm->pax_flags = 0UL;
59300+#endif
59301+
59302+#ifdef CONFIG_PAX_PAGEEXEC
59303+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
59304+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
59305+
59306+#ifdef CONFIG_PAX_EMUTRAMP
59307+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
59308+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
59309+#endif
59310+
59311+#ifdef CONFIG_PAX_MPROTECT
59312+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
59313+ current->mm->pax_flags |= MF_PAX_MPROTECT;
59314+#endif
59315+
59316+ }
59317+#endif
59318+
59319 if (N_MAGIC(ex) == OMAGIC) {
59320 unsigned long text_addr, map_size;
59321 loff_t pos;
59322@@ -321,7 +349,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
59323 }
59324
59325 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
59326- PROT_READ | PROT_WRITE | PROT_EXEC,
59327+ PROT_READ | PROT_WRITE,
59328 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
59329 fd_offset + ex.a_text);
59330 if (error != N_DATADDR(ex)) {
59331diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
59332index 3892c1a..4e27c04 100644
59333--- a/fs/binfmt_elf.c
59334+++ b/fs/binfmt_elf.c
59335@@ -34,6 +34,7 @@
59336 #include <linux/utsname.h>
59337 #include <linux/coredump.h>
59338 #include <linux/sched.h>
59339+#include <linux/xattr.h>
59340 #include <asm/uaccess.h>
59341 #include <asm/param.h>
59342 #include <asm/page.h>
59343@@ -47,7 +48,7 @@
59344
59345 static int load_elf_binary(struct linux_binprm *bprm);
59346 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
59347- int, int, unsigned long);
59348+ int, int, unsigned long) __intentional_overflow(-1);
59349
59350 #ifdef CONFIG_USELIB
59351 static int load_elf_library(struct file *);
59352@@ -65,6 +66,14 @@ static int elf_core_dump(struct coredump_params *cprm);
59353 #define elf_core_dump NULL
59354 #endif
59355
59356+#ifdef CONFIG_PAX_MPROTECT
59357+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
59358+#endif
59359+
59360+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59361+static void elf_handle_mmap(struct file *file);
59362+#endif
59363+
59364 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
59365 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
59366 #else
59367@@ -84,6 +93,15 @@ static struct linux_binfmt elf_format = {
59368 .load_binary = load_elf_binary,
59369 .load_shlib = load_elf_library,
59370 .core_dump = elf_core_dump,
59371+
59372+#ifdef CONFIG_PAX_MPROTECT
59373+ .handle_mprotect= elf_handle_mprotect,
59374+#endif
59375+
59376+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59377+ .handle_mmap = elf_handle_mmap,
59378+#endif
59379+
59380 .min_coredump = ELF_EXEC_PAGESIZE,
59381 };
59382
59383@@ -91,6 +109,8 @@ static struct linux_binfmt elf_format = {
59384
59385 static int set_brk(unsigned long start, unsigned long end)
59386 {
59387+ unsigned long e = end;
59388+
59389 start = ELF_PAGEALIGN(start);
59390 end = ELF_PAGEALIGN(end);
59391 if (end > start) {
59392@@ -99,7 +119,7 @@ static int set_brk(unsigned long start, unsigned long end)
59393 if (BAD_ADDR(addr))
59394 return addr;
59395 }
59396- current->mm->start_brk = current->mm->brk = end;
59397+ current->mm->start_brk = current->mm->brk = e;
59398 return 0;
59399 }
59400
59401@@ -160,12 +180,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
59402 elf_addr_t __user *u_rand_bytes;
59403 const char *k_platform = ELF_PLATFORM;
59404 const char *k_base_platform = ELF_BASE_PLATFORM;
59405- unsigned char k_rand_bytes[16];
59406+ u32 k_rand_bytes[4];
59407 int items;
59408 elf_addr_t *elf_info;
59409 int ei_index = 0;
59410 const struct cred *cred = current_cred();
59411 struct vm_area_struct *vma;
59412+ unsigned long saved_auxv[AT_VECTOR_SIZE];
59413
59414 /*
59415 * In some cases (e.g. Hyper-Threading), we want to avoid L1
59416@@ -207,8 +228,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
59417 * Generate 16 random bytes for userspace PRNG seeding.
59418 */
59419 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
59420- u_rand_bytes = (elf_addr_t __user *)
59421- STACK_ALLOC(p, sizeof(k_rand_bytes));
59422+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
59423+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
59424+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
59425+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
59426+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
59427+ u_rand_bytes = (elf_addr_t __user *) p;
59428 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
59429 return -EFAULT;
59430
59431@@ -323,9 +348,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
59432 return -EFAULT;
59433 current->mm->env_end = p;
59434
59435+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
59436+
59437 /* Put the elf_info on the stack in the right place. */
59438 sp = (elf_addr_t __user *)envp + 1;
59439- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
59440+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
59441 return -EFAULT;
59442 return 0;
59443 }
59444@@ -393,15 +420,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
59445 an ELF header */
59446
59447 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
59448- struct file *interpreter, unsigned long *interp_map_addr,
59449- unsigned long no_base)
59450+ struct file *interpreter, unsigned long no_base)
59451 {
59452 struct elf_phdr *elf_phdata;
59453 struct elf_phdr *eppnt;
59454- unsigned long load_addr = 0;
59455+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
59456 int load_addr_set = 0;
59457 unsigned long last_bss = 0, elf_bss = 0;
59458- unsigned long error = ~0UL;
59459+ unsigned long error = -EINVAL;
59460 unsigned long total_size;
59461 int retval, i, size;
59462
59463@@ -447,6 +473,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
59464 goto out_close;
59465 }
59466
59467+#ifdef CONFIG_PAX_SEGMEXEC
59468+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
59469+ pax_task_size = SEGMEXEC_TASK_SIZE;
59470+#endif
59471+
59472 eppnt = elf_phdata;
59473 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
59474 if (eppnt->p_type == PT_LOAD) {
59475@@ -470,8 +501,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
59476 map_addr = elf_map(interpreter, load_addr + vaddr,
59477 eppnt, elf_prot, elf_type, total_size);
59478 total_size = 0;
59479- if (!*interp_map_addr)
59480- *interp_map_addr = map_addr;
59481 error = map_addr;
59482 if (BAD_ADDR(map_addr))
59483 goto out_close;
59484@@ -490,8 +519,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
59485 k = load_addr + eppnt->p_vaddr;
59486 if (BAD_ADDR(k) ||
59487 eppnt->p_filesz > eppnt->p_memsz ||
59488- eppnt->p_memsz > TASK_SIZE ||
59489- TASK_SIZE - eppnt->p_memsz < k) {
59490+ eppnt->p_memsz > pax_task_size ||
59491+ pax_task_size - eppnt->p_memsz < k) {
59492 error = -ENOMEM;
59493 goto out_close;
59494 }
59495@@ -530,9 +559,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
59496 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
59497
59498 /* Map the last of the bss segment */
59499- error = vm_brk(elf_bss, last_bss - elf_bss);
59500- if (BAD_ADDR(error))
59501- goto out_close;
59502+ if (last_bss > elf_bss) {
59503+ error = vm_brk(elf_bss, last_bss - elf_bss);
59504+ if (BAD_ADDR(error))
59505+ goto out_close;
59506+ }
59507 }
59508
59509 error = load_addr;
59510@@ -543,6 +574,336 @@ out:
59511 return error;
59512 }
59513
59514+#ifdef CONFIG_PAX_PT_PAX_FLAGS
59515+#ifdef CONFIG_PAX_SOFTMODE
59516+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
59517+{
59518+ unsigned long pax_flags = 0UL;
59519+
59520+#ifdef CONFIG_PAX_PAGEEXEC
59521+ if (elf_phdata->p_flags & PF_PAGEEXEC)
59522+ pax_flags |= MF_PAX_PAGEEXEC;
59523+#endif
59524+
59525+#ifdef CONFIG_PAX_SEGMEXEC
59526+ if (elf_phdata->p_flags & PF_SEGMEXEC)
59527+ pax_flags |= MF_PAX_SEGMEXEC;
59528+#endif
59529+
59530+#ifdef CONFIG_PAX_EMUTRAMP
59531+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
59532+ pax_flags |= MF_PAX_EMUTRAMP;
59533+#endif
59534+
59535+#ifdef CONFIG_PAX_MPROTECT
59536+ if (elf_phdata->p_flags & PF_MPROTECT)
59537+ pax_flags |= MF_PAX_MPROTECT;
59538+#endif
59539+
59540+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
59541+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
59542+ pax_flags |= MF_PAX_RANDMMAP;
59543+#endif
59544+
59545+ return pax_flags;
59546+}
59547+#endif
59548+
59549+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
59550+{
59551+ unsigned long pax_flags = 0UL;
59552+
59553+#ifdef CONFIG_PAX_PAGEEXEC
59554+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
59555+ pax_flags |= MF_PAX_PAGEEXEC;
59556+#endif
59557+
59558+#ifdef CONFIG_PAX_SEGMEXEC
59559+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
59560+ pax_flags |= MF_PAX_SEGMEXEC;
59561+#endif
59562+
59563+#ifdef CONFIG_PAX_EMUTRAMP
59564+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
59565+ pax_flags |= MF_PAX_EMUTRAMP;
59566+#endif
59567+
59568+#ifdef CONFIG_PAX_MPROTECT
59569+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
59570+ pax_flags |= MF_PAX_MPROTECT;
59571+#endif
59572+
59573+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
59574+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
59575+ pax_flags |= MF_PAX_RANDMMAP;
59576+#endif
59577+
59578+ return pax_flags;
59579+}
59580+#endif
59581+
59582+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
59583+#ifdef CONFIG_PAX_SOFTMODE
59584+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
59585+{
59586+ unsigned long pax_flags = 0UL;
59587+
59588+#ifdef CONFIG_PAX_PAGEEXEC
59589+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
59590+ pax_flags |= MF_PAX_PAGEEXEC;
59591+#endif
59592+
59593+#ifdef CONFIG_PAX_SEGMEXEC
59594+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
59595+ pax_flags |= MF_PAX_SEGMEXEC;
59596+#endif
59597+
59598+#ifdef CONFIG_PAX_EMUTRAMP
59599+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
59600+ pax_flags |= MF_PAX_EMUTRAMP;
59601+#endif
59602+
59603+#ifdef CONFIG_PAX_MPROTECT
59604+ if (pax_flags_softmode & MF_PAX_MPROTECT)
59605+ pax_flags |= MF_PAX_MPROTECT;
59606+#endif
59607+
59608+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
59609+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
59610+ pax_flags |= MF_PAX_RANDMMAP;
59611+#endif
59612+
59613+ return pax_flags;
59614+}
59615+#endif
59616+
59617+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
59618+{
59619+ unsigned long pax_flags = 0UL;
59620+
59621+#ifdef CONFIG_PAX_PAGEEXEC
59622+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
59623+ pax_flags |= MF_PAX_PAGEEXEC;
59624+#endif
59625+
59626+#ifdef CONFIG_PAX_SEGMEXEC
59627+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
59628+ pax_flags |= MF_PAX_SEGMEXEC;
59629+#endif
59630+
59631+#ifdef CONFIG_PAX_EMUTRAMP
59632+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
59633+ pax_flags |= MF_PAX_EMUTRAMP;
59634+#endif
59635+
59636+#ifdef CONFIG_PAX_MPROTECT
59637+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
59638+ pax_flags |= MF_PAX_MPROTECT;
59639+#endif
59640+
59641+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
59642+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
59643+ pax_flags |= MF_PAX_RANDMMAP;
59644+#endif
59645+
59646+ return pax_flags;
59647+}
59648+#endif
59649+
59650+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59651+static unsigned long pax_parse_defaults(void)
59652+{
59653+ unsigned long pax_flags = 0UL;
59654+
59655+#ifdef CONFIG_PAX_SOFTMODE
59656+ if (pax_softmode)
59657+ return pax_flags;
59658+#endif
59659+
59660+#ifdef CONFIG_PAX_PAGEEXEC
59661+ pax_flags |= MF_PAX_PAGEEXEC;
59662+#endif
59663+
59664+#ifdef CONFIG_PAX_SEGMEXEC
59665+ pax_flags |= MF_PAX_SEGMEXEC;
59666+#endif
59667+
59668+#ifdef CONFIG_PAX_MPROTECT
59669+ pax_flags |= MF_PAX_MPROTECT;
59670+#endif
59671+
59672+#ifdef CONFIG_PAX_RANDMMAP
59673+ if (randomize_va_space)
59674+ pax_flags |= MF_PAX_RANDMMAP;
59675+#endif
59676+
59677+ return pax_flags;
59678+}
59679+
59680+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
59681+{
59682+ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
59683+
59684+#ifdef CONFIG_PAX_EI_PAX
59685+
59686+#ifdef CONFIG_PAX_SOFTMODE
59687+ if (pax_softmode)
59688+ return pax_flags;
59689+#endif
59690+
59691+ pax_flags = 0UL;
59692+
59693+#ifdef CONFIG_PAX_PAGEEXEC
59694+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
59695+ pax_flags |= MF_PAX_PAGEEXEC;
59696+#endif
59697+
59698+#ifdef CONFIG_PAX_SEGMEXEC
59699+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
59700+ pax_flags |= MF_PAX_SEGMEXEC;
59701+#endif
59702+
59703+#ifdef CONFIG_PAX_EMUTRAMP
59704+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
59705+ pax_flags |= MF_PAX_EMUTRAMP;
59706+#endif
59707+
59708+#ifdef CONFIG_PAX_MPROTECT
59709+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
59710+ pax_flags |= MF_PAX_MPROTECT;
59711+#endif
59712+
59713+#ifdef CONFIG_PAX_ASLR
59714+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
59715+ pax_flags |= MF_PAX_RANDMMAP;
59716+#endif
59717+
59718+#endif
59719+
59720+ return pax_flags;
59721+
59722+}
59723+
59724+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
59725+{
59726+
59727+#ifdef CONFIG_PAX_PT_PAX_FLAGS
59728+ unsigned long i;
59729+
59730+ for (i = 0UL; i < elf_ex->e_phnum; i++)
59731+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
59732+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
59733+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
59734+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
59735+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
59736+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
59737+ return PAX_PARSE_FLAGS_FALLBACK;
59738+
59739+#ifdef CONFIG_PAX_SOFTMODE
59740+ if (pax_softmode)
59741+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
59742+ else
59743+#endif
59744+
59745+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
59746+ break;
59747+ }
59748+#endif
59749+
59750+ return PAX_PARSE_FLAGS_FALLBACK;
59751+}
59752+
59753+static unsigned long pax_parse_xattr_pax(struct file * const file)
59754+{
59755+
59756+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
59757+ ssize_t xattr_size, i;
59758+ unsigned char xattr_value[sizeof("pemrs") - 1];
59759+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
59760+
59761+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
59762+ if (xattr_size < 0 || xattr_size > sizeof xattr_value)
59763+ return PAX_PARSE_FLAGS_FALLBACK;
59764+
59765+ for (i = 0; i < xattr_size; i++)
59766+ switch (xattr_value[i]) {
59767+ default:
59768+ return PAX_PARSE_FLAGS_FALLBACK;
59769+
59770+#define parse_flag(option1, option2, flag) \
59771+ case option1: \
59772+ if (pax_flags_hardmode & MF_PAX_##flag) \
59773+ return PAX_PARSE_FLAGS_FALLBACK;\
59774+ pax_flags_hardmode |= MF_PAX_##flag; \
59775+ break; \
59776+ case option2: \
59777+ if (pax_flags_softmode & MF_PAX_##flag) \
59778+ return PAX_PARSE_FLAGS_FALLBACK;\
59779+ pax_flags_softmode |= MF_PAX_##flag; \
59780+ break;
59781+
59782+ parse_flag('p', 'P', PAGEEXEC);
59783+ parse_flag('e', 'E', EMUTRAMP);
59784+ parse_flag('m', 'M', MPROTECT);
59785+ parse_flag('r', 'R', RANDMMAP);
59786+ parse_flag('s', 'S', SEGMEXEC);
59787+
59788+#undef parse_flag
59789+ }
59790+
59791+ if (pax_flags_hardmode & pax_flags_softmode)
59792+ return PAX_PARSE_FLAGS_FALLBACK;
59793+
59794+#ifdef CONFIG_PAX_SOFTMODE
59795+ if (pax_softmode)
59796+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
59797+ else
59798+#endif
59799+
59800+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
59801+#else
59802+ return PAX_PARSE_FLAGS_FALLBACK;
59803+#endif
59804+
59805+}
59806+
59807+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
59808+{
59809+ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
59810+
59811+ pax_flags = pax_parse_defaults();
59812+ ei_pax_flags = pax_parse_ei_pax(elf_ex);
59813+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
59814+ xattr_pax_flags = pax_parse_xattr_pax(file);
59815+
59816+ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
59817+ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
59818+ pt_pax_flags != xattr_pax_flags)
59819+ return -EINVAL;
59820+ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
59821+ pax_flags = xattr_pax_flags;
59822+ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
59823+ pax_flags = pt_pax_flags;
59824+ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
59825+ pax_flags = ei_pax_flags;
59826+
59827+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
59828+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
59829+ if ((__supported_pte_mask & _PAGE_NX))
59830+ pax_flags &= ~MF_PAX_SEGMEXEC;
59831+ else
59832+ pax_flags &= ~MF_PAX_PAGEEXEC;
59833+ }
59834+#endif
59835+
59836+ if (0 > pax_check_flags(&pax_flags))
59837+ return -EINVAL;
59838+
59839+ current->mm->pax_flags = pax_flags;
59840+ return 0;
59841+}
59842+#endif
59843+
59844 /*
59845 * These are the functions used to load ELF style executables and shared
59846 * libraries. There is no binary dependent code anywhere else.
59847@@ -556,6 +917,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
59848 {
59849 unsigned int random_variable = 0;
59850
59851+#ifdef CONFIG_PAX_RANDUSTACK
59852+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
59853+ return stack_top - current->mm->delta_stack;
59854+#endif
59855+
59856 if ((current->flags & PF_RANDOMIZE) &&
59857 !(current->personality & ADDR_NO_RANDOMIZE)) {
59858 random_variable = get_random_int() & STACK_RND_MASK;
59859@@ -574,7 +940,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
59860 unsigned long load_addr = 0, load_bias = 0;
59861 int load_addr_set = 0;
59862 char * elf_interpreter = NULL;
59863- unsigned long error;
59864+ unsigned long error = 0;
59865 struct elf_phdr *elf_ppnt, *elf_phdata;
59866 unsigned long elf_bss, elf_brk;
59867 int retval, i;
59868@@ -589,6 +955,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
59869 struct elfhdr elf_ex;
59870 struct elfhdr interp_elf_ex;
59871 } *loc;
59872+ unsigned long pax_task_size;
59873
59874 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
59875 if (!loc) {
59876@@ -726,6 +1093,77 @@ static int load_elf_binary(struct linux_binprm *bprm)
59877 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
59878 may depend on the personality. */
59879 SET_PERSONALITY(loc->elf_ex);
59880+
59881+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59882+ current->mm->pax_flags = 0UL;
59883+#endif
59884+
59885+#ifdef CONFIG_PAX_DLRESOLVE
59886+ current->mm->call_dl_resolve = 0UL;
59887+#endif
59888+
59889+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59890+ current->mm->call_syscall = 0UL;
59891+#endif
59892+
59893+#ifdef CONFIG_PAX_ASLR
59894+ current->mm->delta_mmap = 0UL;
59895+ current->mm->delta_stack = 0UL;
59896+#endif
59897+
59898+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59899+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
59900+ send_sig(SIGKILL, current, 0);
59901+ goto out_free_dentry;
59902+ }
59903+#endif
59904+
59905+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
59906+ pax_set_initial_flags(bprm);
59907+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
59908+ if (pax_set_initial_flags_func)
59909+ (pax_set_initial_flags_func)(bprm);
59910+#endif
59911+
59912+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59913+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
59914+ current->mm->context.user_cs_limit = PAGE_SIZE;
59915+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
59916+ }
59917+#endif
59918+
59919+#ifdef CONFIG_PAX_SEGMEXEC
59920+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
59921+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
59922+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
59923+ pax_task_size = SEGMEXEC_TASK_SIZE;
59924+ current->mm->def_flags |= VM_NOHUGEPAGE;
59925+ } else
59926+#endif
59927+
59928+ pax_task_size = TASK_SIZE;
59929+
59930+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
59931+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
59932+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
59933+ put_cpu();
59934+ }
59935+#endif
59936+
59937+#ifdef CONFIG_PAX_ASLR
59938+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
59939+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
59940+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
59941+ }
59942+#endif
59943+
59944+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
59945+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
59946+ executable_stack = EXSTACK_DISABLE_X;
59947+ current->personality &= ~READ_IMPLIES_EXEC;
59948+ } else
59949+#endif
59950+
59951 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
59952 current->personality |= READ_IMPLIES_EXEC;
59953
59954@@ -815,6 +1253,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
59955 #else
59956 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
59957 #endif
59958+
59959+#ifdef CONFIG_PAX_RANDMMAP
59960+ /* PaX: randomize base address at the default exe base if requested */
59961+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
59962+#ifdef CONFIG_SPARC64
59963+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
59964+#else
59965+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
59966+#endif
59967+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
59968+ elf_flags |= MAP_FIXED;
59969+ }
59970+#endif
59971+
59972 }
59973
59974 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
59975@@ -847,9 +1299,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
59976 * allowed task size. Note that p_filesz must always be
59977 * <= p_memsz so it is only necessary to check p_memsz.
59978 */
59979- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
59980- elf_ppnt->p_memsz > TASK_SIZE ||
59981- TASK_SIZE - elf_ppnt->p_memsz < k) {
59982+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
59983+ elf_ppnt->p_memsz > pax_task_size ||
59984+ pax_task_size - elf_ppnt->p_memsz < k) {
59985 /* set_brk can never work. Avoid overflows. */
59986 send_sig(SIGKILL, current, 0);
59987 retval = -EINVAL;
59988@@ -888,17 +1340,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
59989 goto out_free_dentry;
59990 }
59991 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
59992- send_sig(SIGSEGV, current, 0);
59993- retval = -EFAULT; /* Nobody gets to see this, but.. */
59994- goto out_free_dentry;
59995+ /*
59996+ * This bss-zeroing can fail if the ELF
59997+ * file specifies odd protections. So
59998+ * we don't check the return value
59999+ */
60000 }
60001
60002+#ifdef CONFIG_PAX_RANDMMAP
60003+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
60004+ unsigned long start, size, flags;
60005+ vm_flags_t vm_flags;
60006+
60007+ start = ELF_PAGEALIGN(elf_brk);
60008+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
60009+ flags = MAP_FIXED | MAP_PRIVATE;
60010+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
60011+
60012+ down_write(&current->mm->mmap_sem);
60013+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
60014+ retval = -ENOMEM;
60015+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
60016+// if (current->personality & ADDR_NO_RANDOMIZE)
60017+// vm_flags |= VM_READ | VM_MAYREAD;
60018+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
60019+ retval = IS_ERR_VALUE(start) ? start : 0;
60020+ }
60021+ up_write(&current->mm->mmap_sem);
60022+ if (retval == 0)
60023+ retval = set_brk(start + size, start + size + PAGE_SIZE);
60024+ if (retval < 0) {
60025+ send_sig(SIGKILL, current, 0);
60026+ goto out_free_dentry;
60027+ }
60028+ }
60029+#endif
60030+
60031 if (elf_interpreter) {
60032- unsigned long interp_map_addr = 0;
60033-
60034 elf_entry = load_elf_interp(&loc->interp_elf_ex,
60035 interpreter,
60036- &interp_map_addr,
60037 load_bias);
60038 if (!IS_ERR((void *)elf_entry)) {
60039 /*
60040@@ -1130,7 +1610,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
60041 * Decide what to dump of a segment, part, all or none.
60042 */
60043 static unsigned long vma_dump_size(struct vm_area_struct *vma,
60044- unsigned long mm_flags)
60045+ unsigned long mm_flags, long signr)
60046 {
60047 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
60048
60049@@ -1168,7 +1648,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
60050 if (vma->vm_file == NULL)
60051 return 0;
60052
60053- if (FILTER(MAPPED_PRIVATE))
60054+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
60055 goto whole;
60056
60057 /*
60058@@ -1375,9 +1855,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
60059 {
60060 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
60061 int i = 0;
60062- do
60063+ do {
60064 i += 2;
60065- while (auxv[i - 2] != AT_NULL);
60066+ } while (auxv[i - 2] != AT_NULL);
60067 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
60068 }
60069
60070@@ -1386,7 +1866,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
60071 {
60072 mm_segment_t old_fs = get_fs();
60073 set_fs(KERNEL_DS);
60074- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
60075+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
60076 set_fs(old_fs);
60077 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
60078 }
60079@@ -2010,14 +2490,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
60080 }
60081
60082 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
60083- unsigned long mm_flags)
60084+ struct coredump_params *cprm)
60085 {
60086 struct vm_area_struct *vma;
60087 size_t size = 0;
60088
60089 for (vma = first_vma(current, gate_vma); vma != NULL;
60090 vma = next_vma(vma, gate_vma))
60091- size += vma_dump_size(vma, mm_flags);
60092+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
60093 return size;
60094 }
60095
60096@@ -2108,7 +2588,7 @@ static int elf_core_dump(struct coredump_params *cprm)
60097
60098 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
60099
60100- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
60101+ offset += elf_core_vma_data_size(gate_vma, cprm);
60102 offset += elf_core_extra_data_size();
60103 e_shoff = offset;
60104
60105@@ -2136,7 +2616,7 @@ static int elf_core_dump(struct coredump_params *cprm)
60106 phdr.p_offset = offset;
60107 phdr.p_vaddr = vma->vm_start;
60108 phdr.p_paddr = 0;
60109- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
60110+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
60111 phdr.p_memsz = vma->vm_end - vma->vm_start;
60112 offset += phdr.p_filesz;
60113 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
60114@@ -2169,7 +2649,7 @@ static int elf_core_dump(struct coredump_params *cprm)
60115 unsigned long addr;
60116 unsigned long end;
60117
60118- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
60119+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
60120
60121 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
60122 struct page *page;
60123@@ -2210,6 +2690,167 @@ out:
60124
60125 #endif /* CONFIG_ELF_CORE */
60126
60127+#ifdef CONFIG_PAX_MPROTECT
60128+/* PaX: non-PIC ELF libraries need relocations on their executable segments
60129+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
60130+ * we'll remove VM_MAYWRITE for good on RELRO segments.
60131+ *
60132+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
60133+ * basis because we want to allow the common case and not the special ones.
60134+ */
60135+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
60136+{
60137+ struct elfhdr elf_h;
60138+ struct elf_phdr elf_p;
60139+ unsigned long i;
60140+ unsigned long oldflags;
60141+ bool is_textrel_rw, is_textrel_rx, is_relro;
60142+
60143+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
60144+ return;
60145+
60146+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
60147+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
60148+
60149+#ifdef CONFIG_PAX_ELFRELOCS
60150+ /* possible TEXTREL */
60151+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
60152+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
60153+#else
60154+ is_textrel_rw = false;
60155+ is_textrel_rx = false;
60156+#endif
60157+
60158+ /* possible RELRO */
60159+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
60160+
60161+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
60162+ return;
60163+
60164+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
60165+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
60166+
60167+#ifdef CONFIG_PAX_ETEXECRELOCS
60168+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
60169+#else
60170+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
60171+#endif
60172+
60173+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
60174+ !elf_check_arch(&elf_h) ||
60175+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
60176+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
60177+ return;
60178+
60179+ for (i = 0UL; i < elf_h.e_phnum; i++) {
60180+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
60181+ return;
60182+ switch (elf_p.p_type) {
60183+ case PT_DYNAMIC:
60184+ if (!is_textrel_rw && !is_textrel_rx)
60185+ continue;
60186+ i = 0UL;
60187+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
60188+ elf_dyn dyn;
60189+
60190+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
60191+ break;
60192+ if (dyn.d_tag == DT_NULL)
60193+ break;
60194+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
60195+ gr_log_textrel(vma);
60196+ if (is_textrel_rw)
60197+ vma->vm_flags |= VM_MAYWRITE;
60198+ else
60199+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
60200+ vma->vm_flags &= ~VM_MAYWRITE;
60201+ break;
60202+ }
60203+ i++;
60204+ }
60205+ is_textrel_rw = false;
60206+ is_textrel_rx = false;
60207+ continue;
60208+
60209+ case PT_GNU_RELRO:
60210+ if (!is_relro)
60211+ continue;
60212+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
60213+ vma->vm_flags &= ~VM_MAYWRITE;
60214+ is_relro = false;
60215+ continue;
60216+
60217+#ifdef CONFIG_PAX_PT_PAX_FLAGS
60218+ case PT_PAX_FLAGS: {
60219+ const char *msg_mprotect = "", *msg_emutramp = "";
60220+ char *buffer_lib, *buffer_exe;
60221+
60222+ if (elf_p.p_flags & PF_NOMPROTECT)
60223+ msg_mprotect = "MPROTECT disabled";
60224+
60225+#ifdef CONFIG_PAX_EMUTRAMP
60226+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
60227+ msg_emutramp = "EMUTRAMP enabled";
60228+#endif
60229+
60230+ if (!msg_mprotect[0] && !msg_emutramp[0])
60231+ continue;
60232+
60233+ if (!printk_ratelimit())
60234+ continue;
60235+
60236+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
60237+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
60238+ if (buffer_lib && buffer_exe) {
60239+ char *path_lib, *path_exe;
60240+
60241+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
60242+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
60243+
60244+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
60245+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
60246+
60247+ }
60248+ free_page((unsigned long)buffer_exe);
60249+ free_page((unsigned long)buffer_lib);
60250+ continue;
60251+ }
60252+#endif
60253+
60254+ }
60255+ }
60256+}
60257+#endif
60258+
60259+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
60260+
60261+extern int grsec_enable_log_rwxmaps;
60262+
60263+static void elf_handle_mmap(struct file *file)
60264+{
60265+ struct elfhdr elf_h;
60266+ struct elf_phdr elf_p;
60267+ unsigned long i;
60268+
60269+ if (!grsec_enable_log_rwxmaps)
60270+ return;
60271+
60272+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
60273+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
60274+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
60275+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
60276+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
60277+ return;
60278+
60279+ for (i = 0UL; i < elf_h.e_phnum; i++) {
60280+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
60281+ return;
60282+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
60283+ gr_log_ptgnustack(file);
60284+ }
60285+}
60286+#endif
60287+
60288 static int __init init_elf_binfmt(void)
60289 {
60290 register_binfmt(&elf_format);
60291diff --git a/fs/block_dev.c b/fs/block_dev.c
60292index 6d72746..536d1db 100644
60293--- a/fs/block_dev.c
60294+++ b/fs/block_dev.c
60295@@ -701,7 +701,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
60296 else if (bdev->bd_contains == bdev)
60297 return true; /* is a whole device which isn't held */
60298
60299- else if (whole->bd_holder == bd_may_claim)
60300+ else if (whole->bd_holder == (void *)bd_may_claim)
60301 return true; /* is a partition of a device that is being partitioned */
60302 else if (whole->bd_holder != NULL)
60303 return false; /* is a partition of a held device */
60304diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
60305index 44ee5d2..8b23e53 100644
60306--- a/fs/btrfs/ctree.c
60307+++ b/fs/btrfs/ctree.c
60308@@ -1184,9 +1184,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
60309 free_extent_buffer(buf);
60310 add_root_to_dirty_list(root);
60311 } else {
60312- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
60313- parent_start = parent->start;
60314- else
60315+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
60316+ if (parent)
60317+ parent_start = parent->start;
60318+ else
60319+ parent_start = 0;
60320+ } else
60321 parent_start = 0;
60322
60323 WARN_ON(trans->transid != btrfs_header_generation(parent));
60324diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
60325index a2e90f8..5135e5f 100644
60326--- a/fs/btrfs/delayed-inode.c
60327+++ b/fs/btrfs/delayed-inode.c
60328@@ -462,7 +462,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
60329
60330 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
60331 {
60332- int seq = atomic_inc_return(&delayed_root->items_seq);
60333+ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
60334 if ((atomic_dec_return(&delayed_root->items) <
60335 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
60336 waitqueue_active(&delayed_root->wait))
60337@@ -1412,7 +1412,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
60338
60339 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
60340 {
60341- int val = atomic_read(&delayed_root->items_seq);
60342+ int val = atomic_read_unchecked(&delayed_root->items_seq);
60343
60344 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
60345 return 1;
60346@@ -1436,7 +1436,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
60347 int seq;
60348 int ret;
60349
60350- seq = atomic_read(&delayed_root->items_seq);
60351+ seq = atomic_read_unchecked(&delayed_root->items_seq);
60352
60353 ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
60354 if (ret)
60355diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
60356index f70119f..ab5894d 100644
60357--- a/fs/btrfs/delayed-inode.h
60358+++ b/fs/btrfs/delayed-inode.h
60359@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
60360 */
60361 struct list_head prepare_list;
60362 atomic_t items; /* for delayed items */
60363- atomic_t items_seq; /* for delayed items */
60364+ atomic_unchecked_t items_seq; /* for delayed items */
60365 int nodes; /* for delayed nodes */
60366 wait_queue_head_t wait;
60367 };
60368@@ -90,7 +90,7 @@ static inline void btrfs_init_delayed_root(
60369 struct btrfs_delayed_root *delayed_root)
60370 {
60371 atomic_set(&delayed_root->items, 0);
60372- atomic_set(&delayed_root->items_seq, 0);
60373+ atomic_set_unchecked(&delayed_root->items_seq, 0);
60374 delayed_root->nodes = 0;
60375 spin_lock_init(&delayed_root->lock);
60376 init_waitqueue_head(&delayed_root->wait);
60377diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
60378index b765d41..5a8b0c3 100644
60379--- a/fs/btrfs/ioctl.c
60380+++ b/fs/btrfs/ioctl.c
60381@@ -3975,9 +3975,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
60382 for (i = 0; i < num_types; i++) {
60383 struct btrfs_space_info *tmp;
60384
60385+ /* Don't copy in more than we allocated */
60386 if (!slot_count)
60387 break;
60388
60389+ slot_count--;
60390+
60391 info = NULL;
60392 rcu_read_lock();
60393 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
60394@@ -3999,10 +4002,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
60395 memcpy(dest, &space, sizeof(space));
60396 dest++;
60397 space_args.total_spaces++;
60398- slot_count--;
60399 }
60400- if (!slot_count)
60401- break;
60402 }
60403 up_read(&info->groups_sem);
60404 }
60405diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
60406index c4124de..d7613eb6 100644
60407--- a/fs/btrfs/super.c
60408+++ b/fs/btrfs/super.c
60409@@ -270,7 +270,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
60410 function, line, errstr);
60411 return;
60412 }
60413- ACCESS_ONCE(trans->transaction->aborted) = errno;
60414+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
60415 /* Wake up anybody who may be waiting on this transaction */
60416 wake_up(&root->fs_info->transaction_wait);
60417 wake_up(&root->fs_info->transaction_blocked_wait);
60418diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
60419index 12e5355..cdf30c6 100644
60420--- a/fs/btrfs/sysfs.c
60421+++ b/fs/btrfs/sysfs.c
60422@@ -475,7 +475,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
60423 for (set = 0; set < FEAT_MAX; set++) {
60424 int i;
60425 struct attribute *attrs[2];
60426- struct attribute_group agroup = {
60427+ attribute_group_no_const agroup = {
60428 .name = "features",
60429 .attrs = attrs,
60430 };
60431diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
60432index e2e798a..f454c18 100644
60433--- a/fs/btrfs/tree-log.h
60434+++ b/fs/btrfs/tree-log.h
60435@@ -41,7 +41,7 @@ static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx)
60436 static inline void btrfs_set_log_full_commit(struct btrfs_fs_info *fs_info,
60437 struct btrfs_trans_handle *trans)
60438 {
60439- ACCESS_ONCE(fs_info->last_trans_log_full_commit) = trans->transid;
60440+ ACCESS_ONCE_RW(fs_info->last_trans_log_full_commit) = trans->transid;
60441 }
60442
60443 static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
60444diff --git a/fs/buffer.c b/fs/buffer.c
60445index 3588a80..3d038a9 100644
60446--- a/fs/buffer.c
60447+++ b/fs/buffer.c
60448@@ -2318,6 +2318,11 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
60449 err = 0;
60450
60451 balance_dirty_pages_ratelimited(mapping);
60452+
60453+ if (unlikely(fatal_signal_pending(current))) {
60454+ err = -EINTR;
60455+ goto out;
60456+ }
60457 }
60458
60459 /* page covers the boundary, find the boundary offset */
60460@@ -3424,7 +3429,7 @@ void __init buffer_init(void)
60461 bh_cachep = kmem_cache_create("buffer_head",
60462 sizeof(struct buffer_head), 0,
60463 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
60464- SLAB_MEM_SPREAD),
60465+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
60466 NULL);
60467
60468 /*
60469diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
60470index fbb08e9..0fda764 100644
60471--- a/fs/cachefiles/bind.c
60472+++ b/fs/cachefiles/bind.c
60473@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
60474 args);
60475
60476 /* start by checking things over */
60477- ASSERT(cache->fstop_percent >= 0 &&
60478- cache->fstop_percent < cache->fcull_percent &&
60479+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
60480 cache->fcull_percent < cache->frun_percent &&
60481 cache->frun_percent < 100);
60482
60483- ASSERT(cache->bstop_percent >= 0 &&
60484- cache->bstop_percent < cache->bcull_percent &&
60485+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
60486 cache->bcull_percent < cache->brun_percent &&
60487 cache->brun_percent < 100);
60488
60489diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
60490index ce1b115..4a6852c 100644
60491--- a/fs/cachefiles/daemon.c
60492+++ b/fs/cachefiles/daemon.c
60493@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
60494 if (n > buflen)
60495 return -EMSGSIZE;
60496
60497- if (copy_to_user(_buffer, buffer, n) != 0)
60498+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
60499 return -EFAULT;
60500
60501 return n;
60502@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
60503 if (test_bit(CACHEFILES_DEAD, &cache->flags))
60504 return -EIO;
60505
60506- if (datalen < 0 || datalen > PAGE_SIZE - 1)
60507+ if (datalen > PAGE_SIZE - 1)
60508 return -EOPNOTSUPP;
60509
60510 /* drag the command string into the kernel so we can parse it */
60511@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
60512 if (args[0] != '%' || args[1] != '\0')
60513 return -EINVAL;
60514
60515- if (fstop < 0 || fstop >= cache->fcull_percent)
60516+ if (fstop >= cache->fcull_percent)
60517 return cachefiles_daemon_range_error(cache, args);
60518
60519 cache->fstop_percent = fstop;
60520@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
60521 if (args[0] != '%' || args[1] != '\0')
60522 return -EINVAL;
60523
60524- if (bstop < 0 || bstop >= cache->bcull_percent)
60525+ if (bstop >= cache->bcull_percent)
60526 return cachefiles_daemon_range_error(cache, args);
60527
60528 cache->bstop_percent = bstop;
60529diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
60530index 8c52472..c4e3a69 100644
60531--- a/fs/cachefiles/internal.h
60532+++ b/fs/cachefiles/internal.h
60533@@ -66,7 +66,7 @@ struct cachefiles_cache {
60534 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
60535 struct rb_root active_nodes; /* active nodes (can't be culled) */
60536 rwlock_t active_lock; /* lock for active_nodes */
60537- atomic_t gravecounter; /* graveyard uniquifier */
60538+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
60539 unsigned frun_percent; /* when to stop culling (% files) */
60540 unsigned fcull_percent; /* when to start culling (% files) */
60541 unsigned fstop_percent; /* when to stop allocating (% files) */
60542@@ -178,19 +178,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
60543 * proc.c
60544 */
60545 #ifdef CONFIG_CACHEFILES_HISTOGRAM
60546-extern atomic_t cachefiles_lookup_histogram[HZ];
60547-extern atomic_t cachefiles_mkdir_histogram[HZ];
60548-extern atomic_t cachefiles_create_histogram[HZ];
60549+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
60550+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
60551+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
60552
60553 extern int __init cachefiles_proc_init(void);
60554 extern void cachefiles_proc_cleanup(void);
60555 static inline
60556-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
60557+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
60558 {
60559 unsigned long jif = jiffies - start_jif;
60560 if (jif >= HZ)
60561 jif = HZ - 1;
60562- atomic_inc(&histogram[jif]);
60563+ atomic_inc_unchecked(&histogram[jif]);
60564 }
60565
60566 #else
60567diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
60568index dad7d95..07475af 100644
60569--- a/fs/cachefiles/namei.c
60570+++ b/fs/cachefiles/namei.c
60571@@ -312,7 +312,7 @@ try_again:
60572 /* first step is to make up a grave dentry in the graveyard */
60573 sprintf(nbuffer, "%08x%08x",
60574 (uint32_t) get_seconds(),
60575- (uint32_t) atomic_inc_return(&cache->gravecounter));
60576+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
60577
60578 /* do the multiway lock magic */
60579 trap = lock_rename(cache->graveyard, dir);
60580diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
60581index eccd339..4c1d995 100644
60582--- a/fs/cachefiles/proc.c
60583+++ b/fs/cachefiles/proc.c
60584@@ -14,9 +14,9 @@
60585 #include <linux/seq_file.h>
60586 #include "internal.h"
60587
60588-atomic_t cachefiles_lookup_histogram[HZ];
60589-atomic_t cachefiles_mkdir_histogram[HZ];
60590-atomic_t cachefiles_create_histogram[HZ];
60591+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
60592+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
60593+atomic_unchecked_t cachefiles_create_histogram[HZ];
60594
60595 /*
60596 * display the latency histogram
60597@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
60598 return 0;
60599 default:
60600 index = (unsigned long) v - 3;
60601- x = atomic_read(&cachefiles_lookup_histogram[index]);
60602- y = atomic_read(&cachefiles_mkdir_histogram[index]);
60603- z = atomic_read(&cachefiles_create_histogram[index]);
60604+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
60605+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
60606+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
60607 if (x == 0 && y == 0 && z == 0)
60608 return 0;
60609
60610diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
60611index 25e745b..220e604 100644
60612--- a/fs/cachefiles/rdwr.c
60613+++ b/fs/cachefiles/rdwr.c
60614@@ -937,7 +937,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
60615 old_fs = get_fs();
60616 set_fs(KERNEL_DS);
60617 ret = file->f_op->write(
60618- file, (const void __user *) data, len, &pos);
60619+ file, (const void __force_user *) data, len, &pos);
60620 set_fs(old_fs);
60621 kunmap(page);
60622 file_end_write(file);
60623diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
60624index c29d6ae..719b9bb 100644
60625--- a/fs/ceph/dir.c
60626+++ b/fs/ceph/dir.c
60627@@ -129,6 +129,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
60628 struct dentry *dentry, *last;
60629 struct ceph_dentry_info *di;
60630 int err = 0;
60631+ char d_name[DNAME_INLINE_LEN];
60632+ const unsigned char *name;
60633
60634 /* claim ref on last dentry we returned */
60635 last = fi->dentry;
60636@@ -192,7 +194,12 @@ more:
60637
60638 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, ctx->pos,
60639 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
60640- if (!dir_emit(ctx, dentry->d_name.name,
60641+ name = dentry->d_name.name;
60642+ if (name == dentry->d_iname) {
60643+ memcpy(d_name, name, dentry->d_name.len);
60644+ name = d_name;
60645+ }
60646+ if (!dir_emit(ctx, name,
60647 dentry->d_name.len,
60648 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
60649 dentry->d_inode->i_mode >> 12)) {
60650@@ -250,7 +257,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
60651 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
60652 struct ceph_mds_client *mdsc = fsc->mdsc;
60653 unsigned frag = fpos_frag(ctx->pos);
60654- int off = fpos_off(ctx->pos);
60655+ unsigned int off = fpos_off(ctx->pos);
60656 int err;
60657 u32 ftype;
60658 struct ceph_mds_reply_info_parsed *rinfo;
60659diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
60660index a822a6e..4644256 100644
60661--- a/fs/ceph/ioctl.c
60662+++ b/fs/ceph/ioctl.c
60663@@ -41,7 +41,7 @@ static long __validate_layout(struct ceph_mds_client *mdsc,
60664 /* validate striping parameters */
60665 if ((l->object_size & ~PAGE_MASK) ||
60666 (l->stripe_unit & ~PAGE_MASK) ||
60667- (l->stripe_unit != 0 &&
60668+ ((unsigned)l->stripe_unit != 0 &&
60669 ((unsigned)l->object_size % (unsigned)l->stripe_unit)))
60670 return -EINVAL;
60671
60672diff --git a/fs/ceph/super.c b/fs/ceph/super.c
60673index f6e1237..796ffd1 100644
60674--- a/fs/ceph/super.c
60675+++ b/fs/ceph/super.c
60676@@ -895,7 +895,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
60677 /*
60678 * construct our own bdi so we can control readahead, etc.
60679 */
60680-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
60681+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
60682
60683 static int ceph_register_bdi(struct super_block *sb,
60684 struct ceph_fs_client *fsc)
60685@@ -912,7 +912,7 @@ static int ceph_register_bdi(struct super_block *sb,
60686 default_backing_dev_info.ra_pages;
60687
60688 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
60689- atomic_long_inc_return(&bdi_seq));
60690+ atomic_long_inc_return_unchecked(&bdi_seq));
60691 if (!err)
60692 sb->s_bdi = &fsc->backing_dev_info;
60693 return err;
60694diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
60695index 44ec726..bcb06a3 100644
60696--- a/fs/cifs/cifs_debug.c
60697+++ b/fs/cifs/cifs_debug.c
60698@@ -286,8 +286,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
60699
60700 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
60701 #ifdef CONFIG_CIFS_STATS2
60702- atomic_set(&totBufAllocCount, 0);
60703- atomic_set(&totSmBufAllocCount, 0);
60704+ atomic_set_unchecked(&totBufAllocCount, 0);
60705+ atomic_set_unchecked(&totSmBufAllocCount, 0);
60706 #endif /* CONFIG_CIFS_STATS2 */
60707 spin_lock(&cifs_tcp_ses_lock);
60708 list_for_each(tmp1, &cifs_tcp_ses_list) {
60709@@ -300,7 +300,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
60710 tcon = list_entry(tmp3,
60711 struct cifs_tcon,
60712 tcon_list);
60713- atomic_set(&tcon->num_smbs_sent, 0);
60714+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
60715 if (server->ops->clear_stats)
60716 server->ops->clear_stats(tcon);
60717 }
60718@@ -332,8 +332,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
60719 smBufAllocCount.counter, cifs_min_small);
60720 #ifdef CONFIG_CIFS_STATS2
60721 seq_printf(m, "Total Large %d Small %d Allocations\n",
60722- atomic_read(&totBufAllocCount),
60723- atomic_read(&totSmBufAllocCount));
60724+ atomic_read_unchecked(&totBufAllocCount),
60725+ atomic_read_unchecked(&totSmBufAllocCount));
60726 #endif /* CONFIG_CIFS_STATS2 */
60727
60728 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
60729@@ -362,7 +362,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
60730 if (tcon->need_reconnect)
60731 seq_puts(m, "\tDISCONNECTED ");
60732 seq_printf(m, "\nSMBs: %d",
60733- atomic_read(&tcon->num_smbs_sent));
60734+ atomic_read_unchecked(&tcon->num_smbs_sent));
60735 if (server->ops->print_stats)
60736 server->ops->print_stats(m, tcon);
60737 }
60738diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
60739index 889b984..fcb8431 100644
60740--- a/fs/cifs/cifsfs.c
60741+++ b/fs/cifs/cifsfs.c
60742@@ -1092,7 +1092,7 @@ cifs_init_request_bufs(void)
60743 */
60744 cifs_req_cachep = kmem_cache_create("cifs_request",
60745 CIFSMaxBufSize + max_hdr_size, 0,
60746- SLAB_HWCACHE_ALIGN, NULL);
60747+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
60748 if (cifs_req_cachep == NULL)
60749 return -ENOMEM;
60750
60751@@ -1119,7 +1119,7 @@ cifs_init_request_bufs(void)
60752 efficient to alloc 1 per page off the slab compared to 17K (5page)
60753 alloc of large cifs buffers even when page debugging is on */
60754 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
60755- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
60756+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
60757 NULL);
60758 if (cifs_sm_req_cachep == NULL) {
60759 mempool_destroy(cifs_req_poolp);
60760@@ -1204,8 +1204,8 @@ init_cifs(void)
60761 atomic_set(&bufAllocCount, 0);
60762 atomic_set(&smBufAllocCount, 0);
60763 #ifdef CONFIG_CIFS_STATS2
60764- atomic_set(&totBufAllocCount, 0);
60765- atomic_set(&totSmBufAllocCount, 0);
60766+ atomic_set_unchecked(&totBufAllocCount, 0);
60767+ atomic_set_unchecked(&totSmBufAllocCount, 0);
60768 #endif /* CONFIG_CIFS_STATS2 */
60769
60770 atomic_set(&midCount, 0);
60771diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
60772index 25b8392..01e46dc 100644
60773--- a/fs/cifs/cifsglob.h
60774+++ b/fs/cifs/cifsglob.h
60775@@ -821,35 +821,35 @@ struct cifs_tcon {
60776 __u16 Flags; /* optional support bits */
60777 enum statusEnum tidStatus;
60778 #ifdef CONFIG_CIFS_STATS
60779- atomic_t num_smbs_sent;
60780+ atomic_unchecked_t num_smbs_sent;
60781 union {
60782 struct {
60783- atomic_t num_writes;
60784- atomic_t num_reads;
60785- atomic_t num_flushes;
60786- atomic_t num_oplock_brks;
60787- atomic_t num_opens;
60788- atomic_t num_closes;
60789- atomic_t num_deletes;
60790- atomic_t num_mkdirs;
60791- atomic_t num_posixopens;
60792- atomic_t num_posixmkdirs;
60793- atomic_t num_rmdirs;
60794- atomic_t num_renames;
60795- atomic_t num_t2renames;
60796- atomic_t num_ffirst;
60797- atomic_t num_fnext;
60798- atomic_t num_fclose;
60799- atomic_t num_hardlinks;
60800- atomic_t num_symlinks;
60801- atomic_t num_locks;
60802- atomic_t num_acl_get;
60803- atomic_t num_acl_set;
60804+ atomic_unchecked_t num_writes;
60805+ atomic_unchecked_t num_reads;
60806+ atomic_unchecked_t num_flushes;
60807+ atomic_unchecked_t num_oplock_brks;
60808+ atomic_unchecked_t num_opens;
60809+ atomic_unchecked_t num_closes;
60810+ atomic_unchecked_t num_deletes;
60811+ atomic_unchecked_t num_mkdirs;
60812+ atomic_unchecked_t num_posixopens;
60813+ atomic_unchecked_t num_posixmkdirs;
60814+ atomic_unchecked_t num_rmdirs;
60815+ atomic_unchecked_t num_renames;
60816+ atomic_unchecked_t num_t2renames;
60817+ atomic_unchecked_t num_ffirst;
60818+ atomic_unchecked_t num_fnext;
60819+ atomic_unchecked_t num_fclose;
60820+ atomic_unchecked_t num_hardlinks;
60821+ atomic_unchecked_t num_symlinks;
60822+ atomic_unchecked_t num_locks;
60823+ atomic_unchecked_t num_acl_get;
60824+ atomic_unchecked_t num_acl_set;
60825 } cifs_stats;
60826 #ifdef CONFIG_CIFS_SMB2
60827 struct {
60828- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
60829- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
60830+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
60831+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
60832 } smb2_stats;
60833 #endif /* CONFIG_CIFS_SMB2 */
60834 } stats;
60835@@ -1190,7 +1190,7 @@ convert_delimiter(char *path, char delim)
60836 }
60837
60838 #ifdef CONFIG_CIFS_STATS
60839-#define cifs_stats_inc atomic_inc
60840+#define cifs_stats_inc atomic_inc_unchecked
60841
60842 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
60843 unsigned int bytes)
60844@@ -1557,8 +1557,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
60845 /* Various Debug counters */
60846 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
60847 #ifdef CONFIG_CIFS_STATS2
60848-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
60849-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
60850+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
60851+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
60852 #endif
60853 GLOBAL_EXTERN atomic_t smBufAllocCount;
60854 GLOBAL_EXTERN atomic_t midCount;
60855diff --git a/fs/cifs/file.c b/fs/cifs/file.c
60856index 5f29354..359bc0d 100644
60857--- a/fs/cifs/file.c
60858+++ b/fs/cifs/file.c
60859@@ -2056,10 +2056,14 @@ static int cifs_writepages(struct address_space *mapping,
60860 index = mapping->writeback_index; /* Start from prev offset */
60861 end = -1;
60862 } else {
60863- index = wbc->range_start >> PAGE_CACHE_SHIFT;
60864- end = wbc->range_end >> PAGE_CACHE_SHIFT;
60865- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
60866+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
60867 range_whole = true;
60868+ index = 0;
60869+ end = ULONG_MAX;
60870+ } else {
60871+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
60872+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
60873+ }
60874 scanned = true;
60875 }
60876 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
60877diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
60878index b7415d5..3984ec0 100644
60879--- a/fs/cifs/misc.c
60880+++ b/fs/cifs/misc.c
60881@@ -170,7 +170,7 @@ cifs_buf_get(void)
60882 memset(ret_buf, 0, buf_size + 3);
60883 atomic_inc(&bufAllocCount);
60884 #ifdef CONFIG_CIFS_STATS2
60885- atomic_inc(&totBufAllocCount);
60886+ atomic_inc_unchecked(&totBufAllocCount);
60887 #endif /* CONFIG_CIFS_STATS2 */
60888 }
60889
60890@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
60891 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
60892 atomic_inc(&smBufAllocCount);
60893 #ifdef CONFIG_CIFS_STATS2
60894- atomic_inc(&totSmBufAllocCount);
60895+ atomic_inc_unchecked(&totSmBufAllocCount);
60896 #endif /* CONFIG_CIFS_STATS2 */
60897
60898 }
60899diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
60900index 52131d8..fd79e97 100644
60901--- a/fs/cifs/smb1ops.c
60902+++ b/fs/cifs/smb1ops.c
60903@@ -626,27 +626,27 @@ static void
60904 cifs_clear_stats(struct cifs_tcon *tcon)
60905 {
60906 #ifdef CONFIG_CIFS_STATS
60907- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
60908- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
60909- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
60910- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
60911- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
60912- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
60913- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
60914- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
60915- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
60916- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
60917- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
60918- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
60919- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
60920- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
60921- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
60922- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
60923- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
60924- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
60925- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
60926- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
60927- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
60928+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
60929+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
60930+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
60931+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
60932+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
60933+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
60934+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
60935+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
60936+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
60937+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
60938+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
60939+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
60940+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
60941+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
60942+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
60943+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
60944+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
60945+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
60946+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
60947+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
60948+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
60949 #endif
60950 }
60951
60952@@ -655,36 +655,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
60953 {
60954 #ifdef CONFIG_CIFS_STATS
60955 seq_printf(m, " Oplocks breaks: %d",
60956- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
60957+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
60958 seq_printf(m, "\nReads: %d Bytes: %llu",
60959- atomic_read(&tcon->stats.cifs_stats.num_reads),
60960+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
60961 (long long)(tcon->bytes_read));
60962 seq_printf(m, "\nWrites: %d Bytes: %llu",
60963- atomic_read(&tcon->stats.cifs_stats.num_writes),
60964+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
60965 (long long)(tcon->bytes_written));
60966 seq_printf(m, "\nFlushes: %d",
60967- atomic_read(&tcon->stats.cifs_stats.num_flushes));
60968+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
60969 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
60970- atomic_read(&tcon->stats.cifs_stats.num_locks),
60971- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
60972- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
60973+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
60974+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
60975+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
60976 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
60977- atomic_read(&tcon->stats.cifs_stats.num_opens),
60978- atomic_read(&tcon->stats.cifs_stats.num_closes),
60979- atomic_read(&tcon->stats.cifs_stats.num_deletes));
60980+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
60981+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
60982+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
60983 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
60984- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
60985- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
60986+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
60987+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
60988 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
60989- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
60990- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
60991+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
60992+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
60993 seq_printf(m, "\nRenames: %d T2 Renames %d",
60994- atomic_read(&tcon->stats.cifs_stats.num_renames),
60995- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
60996+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
60997+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
60998 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
60999- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
61000- atomic_read(&tcon->stats.cifs_stats.num_fnext),
61001- atomic_read(&tcon->stats.cifs_stats.num_fclose));
61002+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
61003+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
61004+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
61005 #endif
61006 }
61007
61008diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
61009index f522193..586121b 100644
61010--- a/fs/cifs/smb2ops.c
61011+++ b/fs/cifs/smb2ops.c
61012@@ -414,8 +414,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
61013 #ifdef CONFIG_CIFS_STATS
61014 int i;
61015 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
61016- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
61017- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
61018+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
61019+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
61020 }
61021 #endif
61022 }
61023@@ -455,65 +455,65 @@ static void
61024 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
61025 {
61026 #ifdef CONFIG_CIFS_STATS
61027- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
61028- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
61029+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
61030+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
61031 seq_printf(m, "\nNegotiates: %d sent %d failed",
61032- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
61033- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
61034+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
61035+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
61036 seq_printf(m, "\nSessionSetups: %d sent %d failed",
61037- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
61038- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
61039+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
61040+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
61041 seq_printf(m, "\nLogoffs: %d sent %d failed",
61042- atomic_read(&sent[SMB2_LOGOFF_HE]),
61043- atomic_read(&failed[SMB2_LOGOFF_HE]));
61044+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
61045+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
61046 seq_printf(m, "\nTreeConnects: %d sent %d failed",
61047- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
61048- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
61049+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
61050+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
61051 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
61052- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
61053- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
61054+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
61055+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
61056 seq_printf(m, "\nCreates: %d sent %d failed",
61057- atomic_read(&sent[SMB2_CREATE_HE]),
61058- atomic_read(&failed[SMB2_CREATE_HE]));
61059+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
61060+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
61061 seq_printf(m, "\nCloses: %d sent %d failed",
61062- atomic_read(&sent[SMB2_CLOSE_HE]),
61063- atomic_read(&failed[SMB2_CLOSE_HE]));
61064+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
61065+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
61066 seq_printf(m, "\nFlushes: %d sent %d failed",
61067- atomic_read(&sent[SMB2_FLUSH_HE]),
61068- atomic_read(&failed[SMB2_FLUSH_HE]));
61069+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
61070+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
61071 seq_printf(m, "\nReads: %d sent %d failed",
61072- atomic_read(&sent[SMB2_READ_HE]),
61073- atomic_read(&failed[SMB2_READ_HE]));
61074+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
61075+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
61076 seq_printf(m, "\nWrites: %d sent %d failed",
61077- atomic_read(&sent[SMB2_WRITE_HE]),
61078- atomic_read(&failed[SMB2_WRITE_HE]));
61079+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
61080+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
61081 seq_printf(m, "\nLocks: %d sent %d failed",
61082- atomic_read(&sent[SMB2_LOCK_HE]),
61083- atomic_read(&failed[SMB2_LOCK_HE]));
61084+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
61085+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
61086 seq_printf(m, "\nIOCTLs: %d sent %d failed",
61087- atomic_read(&sent[SMB2_IOCTL_HE]),
61088- atomic_read(&failed[SMB2_IOCTL_HE]));
61089+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
61090+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
61091 seq_printf(m, "\nCancels: %d sent %d failed",
61092- atomic_read(&sent[SMB2_CANCEL_HE]),
61093- atomic_read(&failed[SMB2_CANCEL_HE]));
61094+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
61095+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
61096 seq_printf(m, "\nEchos: %d sent %d failed",
61097- atomic_read(&sent[SMB2_ECHO_HE]),
61098- atomic_read(&failed[SMB2_ECHO_HE]));
61099+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
61100+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
61101 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
61102- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
61103- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
61104+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
61105+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
61106 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
61107- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
61108- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
61109+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
61110+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
61111 seq_printf(m, "\nQueryInfos: %d sent %d failed",
61112- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
61113- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
61114+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
61115+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
61116 seq_printf(m, "\nSetInfos: %d sent %d failed",
61117- atomic_read(&sent[SMB2_SET_INFO_HE]),
61118- atomic_read(&failed[SMB2_SET_INFO_HE]));
61119+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
61120+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
61121 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
61122- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
61123- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
61124+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
61125+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
61126 #endif
61127 }
61128
61129diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
61130index 74b3a66..0c709f3 100644
61131--- a/fs/cifs/smb2pdu.c
61132+++ b/fs/cifs/smb2pdu.c
61133@@ -2143,8 +2143,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
61134 default:
61135 cifs_dbg(VFS, "info level %u isn't supported\n",
61136 srch_inf->info_level);
61137- rc = -EINVAL;
61138- goto qdir_exit;
61139+ return -EINVAL;
61140 }
61141
61142 req->FileIndex = cpu_to_le32(index);
61143diff --git a/fs/coda/cache.c b/fs/coda/cache.c
61144index 278f8fd..e69c52d 100644
61145--- a/fs/coda/cache.c
61146+++ b/fs/coda/cache.c
61147@@ -24,7 +24,7 @@
61148 #include "coda_linux.h"
61149 #include "coda_cache.h"
61150
61151-static atomic_t permission_epoch = ATOMIC_INIT(0);
61152+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
61153
61154 /* replace or extend an acl cache hit */
61155 void coda_cache_enter(struct inode *inode, int mask)
61156@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
61157 struct coda_inode_info *cii = ITOC(inode);
61158
61159 spin_lock(&cii->c_lock);
61160- cii->c_cached_epoch = atomic_read(&permission_epoch);
61161+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
61162 if (!uid_eq(cii->c_uid, current_fsuid())) {
61163 cii->c_uid = current_fsuid();
61164 cii->c_cached_perm = mask;
61165@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
61166 {
61167 struct coda_inode_info *cii = ITOC(inode);
61168 spin_lock(&cii->c_lock);
61169- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
61170+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
61171 spin_unlock(&cii->c_lock);
61172 }
61173
61174 /* remove all acl caches */
61175 void coda_cache_clear_all(struct super_block *sb)
61176 {
61177- atomic_inc(&permission_epoch);
61178+ atomic_inc_unchecked(&permission_epoch);
61179 }
61180
61181
61182@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
61183 spin_lock(&cii->c_lock);
61184 hit = (mask & cii->c_cached_perm) == mask &&
61185 uid_eq(cii->c_uid, current_fsuid()) &&
61186- cii->c_cached_epoch == atomic_read(&permission_epoch);
61187+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
61188 spin_unlock(&cii->c_lock);
61189
61190 return hit;
61191diff --git a/fs/compat.c b/fs/compat.c
61192index 66d3d3c..9c10175 100644
61193--- a/fs/compat.c
61194+++ b/fs/compat.c
61195@@ -54,7 +54,7 @@
61196 #include <asm/ioctls.h>
61197 #include "internal.h"
61198
61199-int compat_log = 1;
61200+int compat_log = 0;
61201
61202 int compat_printk(const char *fmt, ...)
61203 {
61204@@ -512,7 +512,7 @@ COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_reqs, u32 __user *, ctx32p)
61205
61206 set_fs(KERNEL_DS);
61207 /* The __user pointer cast is valid because of the set_fs() */
61208- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
61209+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
61210 set_fs(oldfs);
61211 /* truncating is ok because it's a user address */
61212 if (!ret)
61213@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
61214 goto out;
61215
61216 ret = -EINVAL;
61217- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
61218+ if (nr_segs > UIO_MAXIOV)
61219 goto out;
61220 if (nr_segs > fast_segs) {
61221 ret = -ENOMEM;
61222@@ -850,6 +850,7 @@ struct compat_old_linux_dirent {
61223 struct compat_readdir_callback {
61224 struct dir_context ctx;
61225 struct compat_old_linux_dirent __user *dirent;
61226+ struct file * file;
61227 int result;
61228 };
61229
61230@@ -867,6 +868,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
61231 buf->result = -EOVERFLOW;
61232 return -EOVERFLOW;
61233 }
61234+
61235+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61236+ return 0;
61237+
61238 buf->result++;
61239 dirent = buf->dirent;
61240 if (!access_ok(VERIFY_WRITE, dirent,
61241@@ -898,6 +903,7 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
61242 if (!f.file)
61243 return -EBADF;
61244
61245+ buf.file = f.file;
61246 error = iterate_dir(f.file, &buf.ctx);
61247 if (buf.result)
61248 error = buf.result;
61249@@ -917,6 +923,7 @@ struct compat_getdents_callback {
61250 struct dir_context ctx;
61251 struct compat_linux_dirent __user *current_dir;
61252 struct compat_linux_dirent __user *previous;
61253+ struct file * file;
61254 int count;
61255 int error;
61256 };
61257@@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
61258 buf->error = -EOVERFLOW;
61259 return -EOVERFLOW;
61260 }
61261+
61262+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61263+ return 0;
61264+
61265 dirent = buf->previous;
61266 if (dirent) {
61267 if (__put_user(offset, &dirent->d_off))
61268@@ -983,6 +994,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd,
61269 if (!f.file)
61270 return -EBADF;
61271
61272+ buf.file = f.file;
61273 error = iterate_dir(f.file, &buf.ctx);
61274 if (error >= 0)
61275 error = buf.error;
61276@@ -1003,6 +1015,7 @@ struct compat_getdents_callback64 {
61277 struct dir_context ctx;
61278 struct linux_dirent64 __user *current_dir;
61279 struct linux_dirent64 __user *previous;
61280+ struct file * file;
61281 int count;
61282 int error;
61283 };
61284@@ -1019,6 +1032,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
61285 buf->error = -EINVAL; /* only used if we fail.. */
61286 if (reclen > buf->count)
61287 return -EINVAL;
61288+
61289+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61290+ return 0;
61291+
61292 dirent = buf->previous;
61293
61294 if (dirent) {
61295@@ -1068,6 +1085,7 @@ COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd,
61296 if (!f.file)
61297 return -EBADF;
61298
61299+ buf.file = f.file;
61300 error = iterate_dir(f.file, &buf.ctx);
61301 if (error >= 0)
61302 error = buf.error;
61303diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
61304index 4d24d17..4f8c09e 100644
61305--- a/fs/compat_binfmt_elf.c
61306+++ b/fs/compat_binfmt_elf.c
61307@@ -30,11 +30,13 @@
61308 #undef elf_phdr
61309 #undef elf_shdr
61310 #undef elf_note
61311+#undef elf_dyn
61312 #undef elf_addr_t
61313 #define elfhdr elf32_hdr
61314 #define elf_phdr elf32_phdr
61315 #define elf_shdr elf32_shdr
61316 #define elf_note elf32_note
61317+#define elf_dyn Elf32_Dyn
61318 #define elf_addr_t Elf32_Addr
61319
61320 /*
61321diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
61322index afec645..9c65620 100644
61323--- a/fs/compat_ioctl.c
61324+++ b/fs/compat_ioctl.c
61325@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
61326 return -EFAULT;
61327 if (__get_user(udata, &ss32->iomem_base))
61328 return -EFAULT;
61329- ss.iomem_base = compat_ptr(udata);
61330+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
61331 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
61332 __get_user(ss.port_high, &ss32->port_high))
61333 return -EFAULT;
61334@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
61335 for (i = 0; i < nmsgs; i++) {
61336 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
61337 return -EFAULT;
61338- if (get_user(datap, &umsgs[i].buf) ||
61339- put_user(compat_ptr(datap), &tmsgs[i].buf))
61340+ if (get_user(datap, (compat_caddr_t __user *)&umsgs[i].buf) ||
61341+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
61342 return -EFAULT;
61343 }
61344 return sys_ioctl(fd, cmd, (unsigned long)tdata);
61345@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
61346 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
61347 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
61348 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
61349- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
61350+ copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32)))
61351 return -EFAULT;
61352
61353 return ioctl_preallocate(file, p);
61354@@ -1618,8 +1618,8 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
61355 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
61356 {
61357 unsigned int a, b;
61358- a = *(unsigned int *)p;
61359- b = *(unsigned int *)q;
61360+ a = *(const unsigned int *)p;
61361+ b = *(const unsigned int *)q;
61362 if (a > b)
61363 return 1;
61364 if (a < b)
61365diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
61366index 668dcab..daebcd6 100644
61367--- a/fs/configfs/dir.c
61368+++ b/fs/configfs/dir.c
61369@@ -1548,7 +1548,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
61370 }
61371 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
61372 struct configfs_dirent *next;
61373- const char *name;
61374+ const unsigned char * name;
61375+ char d_name[sizeof(next->s_dentry->d_iname)];
61376 int len;
61377 struct inode *inode = NULL;
61378
61379@@ -1557,7 +1558,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
61380 continue;
61381
61382 name = configfs_get_name(next);
61383- len = strlen(name);
61384+ if (next->s_dentry && name == next->s_dentry->d_iname) {
61385+ len = next->s_dentry->d_name.len;
61386+ memcpy(d_name, name, len);
61387+ name = d_name;
61388+ } else
61389+ len = strlen(name);
61390
61391 /*
61392 * We'll have a dentry and an inode for
61393diff --git a/fs/coredump.c b/fs/coredump.c
61394index a93f7e6..d58bcbe 100644
61395--- a/fs/coredump.c
61396+++ b/fs/coredump.c
61397@@ -442,8 +442,8 @@ static void wait_for_dump_helpers(struct file *file)
61398 struct pipe_inode_info *pipe = file->private_data;
61399
61400 pipe_lock(pipe);
61401- pipe->readers++;
61402- pipe->writers--;
61403+ atomic_inc(&pipe->readers);
61404+ atomic_dec(&pipe->writers);
61405 wake_up_interruptible_sync(&pipe->wait);
61406 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
61407 pipe_unlock(pipe);
61408@@ -452,11 +452,11 @@ static void wait_for_dump_helpers(struct file *file)
61409 * We actually want wait_event_freezable() but then we need
61410 * to clear TIF_SIGPENDING and improve dump_interrupted().
61411 */
61412- wait_event_interruptible(pipe->wait, pipe->readers == 1);
61413+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
61414
61415 pipe_lock(pipe);
61416- pipe->readers--;
61417- pipe->writers++;
61418+ atomic_dec(&pipe->readers);
61419+ atomic_inc(&pipe->writers);
61420 pipe_unlock(pipe);
61421 }
61422
61423@@ -503,7 +503,9 @@ void do_coredump(const siginfo_t *siginfo)
61424 struct files_struct *displaced;
61425 bool need_nonrelative = false;
61426 bool core_dumped = false;
61427- static atomic_t core_dump_count = ATOMIC_INIT(0);
61428+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
61429+ long signr = siginfo->si_signo;
61430+ int dumpable;
61431 struct coredump_params cprm = {
61432 .siginfo = siginfo,
61433 .regs = signal_pt_regs(),
61434@@ -516,12 +518,17 @@ void do_coredump(const siginfo_t *siginfo)
61435 .mm_flags = mm->flags,
61436 };
61437
61438- audit_core_dumps(siginfo->si_signo);
61439+ audit_core_dumps(signr);
61440+
61441+ dumpable = __get_dumpable(cprm.mm_flags);
61442+
61443+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
61444+ gr_handle_brute_attach(dumpable);
61445
61446 binfmt = mm->binfmt;
61447 if (!binfmt || !binfmt->core_dump)
61448 goto fail;
61449- if (!__get_dumpable(cprm.mm_flags))
61450+ if (!dumpable)
61451 goto fail;
61452
61453 cred = prepare_creds();
61454@@ -540,7 +547,7 @@ void do_coredump(const siginfo_t *siginfo)
61455 need_nonrelative = true;
61456 }
61457
61458- retval = coredump_wait(siginfo->si_signo, &core_state);
61459+ retval = coredump_wait(signr, &core_state);
61460 if (retval < 0)
61461 goto fail_creds;
61462
61463@@ -583,7 +590,7 @@ void do_coredump(const siginfo_t *siginfo)
61464 }
61465 cprm.limit = RLIM_INFINITY;
61466
61467- dump_count = atomic_inc_return(&core_dump_count);
61468+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
61469 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
61470 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
61471 task_tgid_vnr(current), current->comm);
61472@@ -615,6 +622,8 @@ void do_coredump(const siginfo_t *siginfo)
61473 } else {
61474 struct inode *inode;
61475
61476+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
61477+
61478 if (cprm.limit < binfmt->min_coredump)
61479 goto fail_unlock;
61480
61481@@ -673,7 +682,7 @@ close_fail:
61482 filp_close(cprm.file, NULL);
61483 fail_dropcount:
61484 if (ispipe)
61485- atomic_dec(&core_dump_count);
61486+ atomic_dec_unchecked(&core_dump_count);
61487 fail_unlock:
61488 kfree(cn.corename);
61489 coredump_finish(mm, core_dumped);
61490@@ -694,6 +703,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
61491 struct file *file = cprm->file;
61492 loff_t pos = file->f_pos;
61493 ssize_t n;
61494+
61495+ gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
61496 if (cprm->written + nr > cprm->limit)
61497 return 0;
61498 while (nr) {
61499diff --git a/fs/dcache.c b/fs/dcache.c
61500index cb25a1a..c557cb6 100644
61501--- a/fs/dcache.c
61502+++ b/fs/dcache.c
61503@@ -478,7 +478,7 @@ static void __dentry_kill(struct dentry *dentry)
61504 * dentry_iput drops the locks, at which point nobody (except
61505 * transient RCU lookups) can reach this dentry.
61506 */
61507- BUG_ON((int)dentry->d_lockref.count > 0);
61508+ BUG_ON((int)__lockref_read(&dentry->d_lockref) > 0);
61509 this_cpu_dec(nr_dentry);
61510 if (dentry->d_op && dentry->d_op->d_release)
61511 dentry->d_op->d_release(dentry);
61512@@ -531,7 +531,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
61513 struct dentry *parent = dentry->d_parent;
61514 if (IS_ROOT(dentry))
61515 return NULL;
61516- if (unlikely((int)dentry->d_lockref.count < 0))
61517+ if (unlikely((int)__lockref_read(&dentry->d_lockref) < 0))
61518 return NULL;
61519 if (likely(spin_trylock(&parent->d_lock)))
61520 return parent;
61521@@ -608,7 +608,7 @@ repeat:
61522 dentry->d_flags |= DCACHE_REFERENCED;
61523 dentry_lru_add(dentry);
61524
61525- dentry->d_lockref.count--;
61526+ __lockref_dec(&dentry->d_lockref);
61527 spin_unlock(&dentry->d_lock);
61528 return;
61529
61530@@ -663,7 +663,7 @@ int d_invalidate(struct dentry * dentry)
61531 * We also need to leave mountpoints alone,
61532 * directory or not.
61533 */
61534- if (dentry->d_lockref.count > 1 && dentry->d_inode) {
61535+ if (__lockref_read(&dentry->d_lockref) > 1 && dentry->d_inode) {
61536 if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
61537 spin_unlock(&dentry->d_lock);
61538 return -EBUSY;
61539@@ -679,7 +679,7 @@ EXPORT_SYMBOL(d_invalidate);
61540 /* This must be called with d_lock held */
61541 static inline void __dget_dlock(struct dentry *dentry)
61542 {
61543- dentry->d_lockref.count++;
61544+ __lockref_inc(&dentry->d_lockref);
61545 }
61546
61547 static inline void __dget(struct dentry *dentry)
61548@@ -720,8 +720,8 @@ repeat:
61549 goto repeat;
61550 }
61551 rcu_read_unlock();
61552- BUG_ON(!ret->d_lockref.count);
61553- ret->d_lockref.count++;
61554+ BUG_ON(!__lockref_read(&ret->d_lockref));
61555+ __lockref_inc(&ret->d_lockref);
61556 spin_unlock(&ret->d_lock);
61557 return ret;
61558 }
61559@@ -798,7 +798,7 @@ restart:
61560 spin_lock(&inode->i_lock);
61561 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
61562 spin_lock(&dentry->d_lock);
61563- if (!dentry->d_lockref.count) {
61564+ if (!__lockref_read(&dentry->d_lockref)) {
61565 /*
61566 * inform the fs via d_prune that this dentry
61567 * is about to be unhashed and destroyed.
61568@@ -841,7 +841,7 @@ static void shrink_dentry_list(struct list_head *list)
61569 * We found an inuse dentry which was not removed from
61570 * the LRU because of laziness during lookup. Do not free it.
61571 */
61572- if ((int)dentry->d_lockref.count > 0) {
61573+ if ((int)__lockref_read(&dentry->d_lockref) > 0) {
61574 spin_unlock(&dentry->d_lock);
61575 if (parent)
61576 spin_unlock(&parent->d_lock);
61577@@ -879,8 +879,8 @@ static void shrink_dentry_list(struct list_head *list)
61578 dentry = parent;
61579 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
61580 parent = lock_parent(dentry);
61581- if (dentry->d_lockref.count != 1) {
61582- dentry->d_lockref.count--;
61583+ if (__lockref_read(&dentry->d_lockref) != 1) {
61584+ __lockref_inc(&dentry->d_lockref);
61585 spin_unlock(&dentry->d_lock);
61586 if (parent)
61587 spin_unlock(&parent->d_lock);
61588@@ -920,7 +920,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
61589 * counts, just remove them from the LRU. Otherwise give them
61590 * another pass through the LRU.
61591 */
61592- if (dentry->d_lockref.count) {
61593+ if (__lockref_read(&dentry->d_lockref) > 0) {
61594 d_lru_isolate(dentry);
61595 spin_unlock(&dentry->d_lock);
61596 return LRU_REMOVED;
61597@@ -1149,6 +1149,7 @@ out_unlock:
61598 return;
61599
61600 rename_retry:
61601+ done_seqretry(&rename_lock, seq);
61602 if (!retry)
61603 return;
61604 seq = 1;
61605@@ -1255,7 +1256,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
61606 } else {
61607 if (dentry->d_flags & DCACHE_LRU_LIST)
61608 d_lru_del(dentry);
61609- if (!dentry->d_lockref.count) {
61610+ if (!__lockref_read(&dentry->d_lockref)) {
61611 d_shrink_add(dentry, &data->dispose);
61612 data->found++;
61613 }
61614@@ -1303,7 +1304,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
61615 return D_WALK_CONTINUE;
61616
61617 /* root with refcount 1 is fine */
61618- if (dentry == _data && dentry->d_lockref.count == 1)
61619+ if (dentry == _data && __lockref_read(&dentry->d_lockref) == 1)
61620 return D_WALK_CONTINUE;
61621
61622 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
61623@@ -1312,7 +1313,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
61624 dentry->d_inode ?
61625 dentry->d_inode->i_ino : 0UL,
61626 dentry,
61627- dentry->d_lockref.count,
61628+ __lockref_read(&dentry->d_lockref),
61629 dentry->d_sb->s_type->name,
61630 dentry->d_sb->s_id);
61631 WARN_ON(1);
61632@@ -1438,7 +1439,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
61633 */
61634 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
61635 if (name->len > DNAME_INLINE_LEN-1) {
61636- dname = kmalloc(name->len + 1, GFP_KERNEL);
61637+ dname = kmalloc(round_up(name->len + 1, sizeof(unsigned long)), GFP_KERNEL);
61638 if (!dname) {
61639 kmem_cache_free(dentry_cache, dentry);
61640 return NULL;
61641@@ -1456,7 +1457,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
61642 smp_wmb();
61643 dentry->d_name.name = dname;
61644
61645- dentry->d_lockref.count = 1;
61646+ __lockref_set(&dentry->d_lockref, 1);
61647 dentry->d_flags = 0;
61648 spin_lock_init(&dentry->d_lock);
61649 seqcount_init(&dentry->d_seq);
61650@@ -2196,7 +2197,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
61651 goto next;
61652 }
61653
61654- dentry->d_lockref.count++;
61655+ __lockref_inc(&dentry->d_lockref);
61656 found = dentry;
61657 spin_unlock(&dentry->d_lock);
61658 break;
61659@@ -2295,7 +2296,7 @@ again:
61660 spin_lock(&dentry->d_lock);
61661 inode = dentry->d_inode;
61662 isdir = S_ISDIR(inode->i_mode);
61663- if (dentry->d_lockref.count == 1) {
61664+ if (__lockref_read(&dentry->d_lockref) == 1) {
61665 if (!spin_trylock(&inode->i_lock)) {
61666 spin_unlock(&dentry->d_lock);
61667 cpu_relax();
61668@@ -2675,11 +2676,13 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
61669 if (!IS_ROOT(new)) {
61670 spin_unlock(&inode->i_lock);
61671 dput(new);
61672+ iput(inode);
61673 return ERR_PTR(-EIO);
61674 }
61675 if (d_ancestor(new, dentry)) {
61676 spin_unlock(&inode->i_lock);
61677 dput(new);
61678+ iput(inode);
61679 return ERR_PTR(-EIO);
61680 }
61681 write_seqlock(&rename_lock);
61682@@ -3300,7 +3303,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
61683
61684 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
61685 dentry->d_flags |= DCACHE_GENOCIDE;
61686- dentry->d_lockref.count--;
61687+ __lockref_dec(&dentry->d_lockref);
61688 }
61689 }
61690 return D_WALK_CONTINUE;
61691@@ -3416,7 +3419,8 @@ void __init vfs_caches_init(unsigned long mempages)
61692 mempages -= reserve;
61693
61694 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
61695- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
61696+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
61697+ SLAB_NO_SANITIZE, NULL);
61698
61699 dcache_init();
61700 inode_init();
61701diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
61702index 1e3b99d..6512101 100644
61703--- a/fs/debugfs/inode.c
61704+++ b/fs/debugfs/inode.c
61705@@ -416,7 +416,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
61706 */
61707 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
61708 {
61709+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
61710+ return __create_file(name, S_IFDIR | S_IRWXU,
61711+#else
61712 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
61713+#endif
61714 parent, NULL, NULL);
61715 }
61716 EXPORT_SYMBOL_GPL(debugfs_create_dir);
61717diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
61718index 57ee4c5..ecb13b0 100644
61719--- a/fs/ecryptfs/inode.c
61720+++ b/fs/ecryptfs/inode.c
61721@@ -673,7 +673,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
61722 old_fs = get_fs();
61723 set_fs(get_ds());
61724 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
61725- (char __user *)lower_buf,
61726+ (char __force_user *)lower_buf,
61727 PATH_MAX);
61728 set_fs(old_fs);
61729 if (rc < 0)
61730diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
61731index e4141f2..d8263e8 100644
61732--- a/fs/ecryptfs/miscdev.c
61733+++ b/fs/ecryptfs/miscdev.c
61734@@ -304,7 +304,7 @@ check_list:
61735 goto out_unlock_msg_ctx;
61736 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
61737 if (msg_ctx->msg) {
61738- if (copy_to_user(&buf[i], packet_length, packet_length_size))
61739+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
61740 goto out_unlock_msg_ctx;
61741 i += packet_length_size;
61742 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
61743diff --git a/fs/exec.c b/fs/exec.c
61744index a2b42a9..1e924b3 100644
61745--- a/fs/exec.c
61746+++ b/fs/exec.c
61747@@ -56,8 +56,20 @@
61748 #include <linux/pipe_fs_i.h>
61749 #include <linux/oom.h>
61750 #include <linux/compat.h>
61751+#include <linux/random.h>
61752+#include <linux/seq_file.h>
61753+#include <linux/coredump.h>
61754+#include <linux/mman.h>
61755+
61756+#ifdef CONFIG_PAX_REFCOUNT
61757+#include <linux/kallsyms.h>
61758+#include <linux/kdebug.h>
61759+#endif
61760+
61761+#include <trace/events/fs.h>
61762
61763 #include <asm/uaccess.h>
61764+#include <asm/sections.h>
61765 #include <asm/mmu_context.h>
61766 #include <asm/tlb.h>
61767
61768@@ -66,19 +78,34 @@
61769
61770 #include <trace/events/sched.h>
61771
61772+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
61773+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
61774+{
61775+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
61776+}
61777+#endif
61778+
61779+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
61780+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
61781+EXPORT_SYMBOL(pax_set_initial_flags_func);
61782+#endif
61783+
61784 int suid_dumpable = 0;
61785
61786 static LIST_HEAD(formats);
61787 static DEFINE_RWLOCK(binfmt_lock);
61788
61789+extern int gr_process_kernel_exec_ban(void);
61790+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
61791+
61792 void __register_binfmt(struct linux_binfmt * fmt, int insert)
61793 {
61794 BUG_ON(!fmt);
61795 if (WARN_ON(!fmt->load_binary))
61796 return;
61797 write_lock(&binfmt_lock);
61798- insert ? list_add(&fmt->lh, &formats) :
61799- list_add_tail(&fmt->lh, &formats);
61800+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
61801+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
61802 write_unlock(&binfmt_lock);
61803 }
61804
61805@@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
61806 void unregister_binfmt(struct linux_binfmt * fmt)
61807 {
61808 write_lock(&binfmt_lock);
61809- list_del(&fmt->lh);
61810+ pax_list_del((struct list_head *)&fmt->lh);
61811 write_unlock(&binfmt_lock);
61812 }
61813
61814@@ -183,18 +210,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
61815 int write)
61816 {
61817 struct page *page;
61818- int ret;
61819
61820-#ifdef CONFIG_STACK_GROWSUP
61821- if (write) {
61822- ret = expand_downwards(bprm->vma, pos);
61823- if (ret < 0)
61824- return NULL;
61825- }
61826-#endif
61827- ret = get_user_pages(current, bprm->mm, pos,
61828- 1, write, 1, &page, NULL);
61829- if (ret <= 0)
61830+ if (0 > expand_downwards(bprm->vma, pos))
61831+ return NULL;
61832+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
61833 return NULL;
61834
61835 if (write) {
61836@@ -210,6 +229,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
61837 if (size <= ARG_MAX)
61838 return page;
61839
61840+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61841+ // only allow 512KB for argv+env on suid/sgid binaries
61842+ // to prevent easy ASLR exhaustion
61843+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
61844+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
61845+ (size > (512 * 1024))) {
61846+ put_page(page);
61847+ return NULL;
61848+ }
61849+#endif
61850+
61851 /*
61852 * Limit to 1/4-th the stack size for the argv+env strings.
61853 * This ensures that:
61854@@ -269,6 +299,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
61855 vma->vm_end = STACK_TOP_MAX;
61856 vma->vm_start = vma->vm_end - PAGE_SIZE;
61857 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
61858+
61859+#ifdef CONFIG_PAX_SEGMEXEC
61860+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
61861+#endif
61862+
61863 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
61864 INIT_LIST_HEAD(&vma->anon_vma_chain);
61865
61866@@ -279,6 +314,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
61867 mm->stack_vm = mm->total_vm = 1;
61868 up_write(&mm->mmap_sem);
61869 bprm->p = vma->vm_end - sizeof(void *);
61870+
61871+#ifdef CONFIG_PAX_RANDUSTACK
61872+ if (randomize_va_space)
61873+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
61874+#endif
61875+
61876 return 0;
61877 err:
61878 up_write(&mm->mmap_sem);
61879@@ -395,7 +436,7 @@ struct user_arg_ptr {
61880 } ptr;
61881 };
61882
61883-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
61884+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
61885 {
61886 const char __user *native;
61887
61888@@ -404,14 +445,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
61889 compat_uptr_t compat;
61890
61891 if (get_user(compat, argv.ptr.compat + nr))
61892- return ERR_PTR(-EFAULT);
61893+ return (const char __force_user *)ERR_PTR(-EFAULT);
61894
61895 return compat_ptr(compat);
61896 }
61897 #endif
61898
61899 if (get_user(native, argv.ptr.native + nr))
61900- return ERR_PTR(-EFAULT);
61901+ return (const char __force_user *)ERR_PTR(-EFAULT);
61902
61903 return native;
61904 }
61905@@ -430,7 +471,7 @@ static int count(struct user_arg_ptr argv, int max)
61906 if (!p)
61907 break;
61908
61909- if (IS_ERR(p))
61910+ if (IS_ERR((const char __force_kernel *)p))
61911 return -EFAULT;
61912
61913 if (i >= max)
61914@@ -465,7 +506,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
61915
61916 ret = -EFAULT;
61917 str = get_user_arg_ptr(argv, argc);
61918- if (IS_ERR(str))
61919+ if (IS_ERR((const char __force_kernel *)str))
61920 goto out;
61921
61922 len = strnlen_user(str, MAX_ARG_STRLEN);
61923@@ -547,7 +588,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
61924 int r;
61925 mm_segment_t oldfs = get_fs();
61926 struct user_arg_ptr argv = {
61927- .ptr.native = (const char __user *const __user *)__argv,
61928+ .ptr.native = (const char __user * const __force_user *)__argv,
61929 };
61930
61931 set_fs(KERNEL_DS);
61932@@ -582,7 +623,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
61933 unsigned long new_end = old_end - shift;
61934 struct mmu_gather tlb;
61935
61936- BUG_ON(new_start > new_end);
61937+ if (new_start >= new_end || new_start < mmap_min_addr)
61938+ return -ENOMEM;
61939
61940 /*
61941 * ensure there are no vmas between where we want to go
61942@@ -591,6 +633,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
61943 if (vma != find_vma(mm, new_start))
61944 return -EFAULT;
61945
61946+#ifdef CONFIG_PAX_SEGMEXEC
61947+ BUG_ON(pax_find_mirror_vma(vma));
61948+#endif
61949+
61950 /*
61951 * cover the whole range: [new_start, old_end)
61952 */
61953@@ -671,10 +717,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
61954 stack_top = arch_align_stack(stack_top);
61955 stack_top = PAGE_ALIGN(stack_top);
61956
61957- if (unlikely(stack_top < mmap_min_addr) ||
61958- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
61959- return -ENOMEM;
61960-
61961 stack_shift = vma->vm_end - stack_top;
61962
61963 bprm->p -= stack_shift;
61964@@ -686,8 +728,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
61965 bprm->exec -= stack_shift;
61966
61967 down_write(&mm->mmap_sem);
61968+
61969+ /* Move stack pages down in memory. */
61970+ if (stack_shift) {
61971+ ret = shift_arg_pages(vma, stack_shift);
61972+ if (ret)
61973+ goto out_unlock;
61974+ }
61975+
61976 vm_flags = VM_STACK_FLAGS;
61977
61978+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
61979+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
61980+ vm_flags &= ~VM_EXEC;
61981+
61982+#ifdef CONFIG_PAX_MPROTECT
61983+ if (mm->pax_flags & MF_PAX_MPROTECT)
61984+ vm_flags &= ~VM_MAYEXEC;
61985+#endif
61986+
61987+ }
61988+#endif
61989+
61990 /*
61991 * Adjust stack execute permissions; explicitly enable for
61992 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
61993@@ -706,13 +768,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
61994 goto out_unlock;
61995 BUG_ON(prev != vma);
61996
61997- /* Move stack pages down in memory. */
61998- if (stack_shift) {
61999- ret = shift_arg_pages(vma, stack_shift);
62000- if (ret)
62001- goto out_unlock;
62002- }
62003-
62004 /* mprotect_fixup is overkill to remove the temporary stack flags */
62005 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
62006
62007@@ -736,6 +791,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
62008 #endif
62009 current->mm->start_stack = bprm->p;
62010 ret = expand_stack(vma, stack_base);
62011+
62012+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
62013+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
62014+ unsigned long size;
62015+ vm_flags_t vm_flags;
62016+
62017+ size = STACK_TOP - vma->vm_end;
62018+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
62019+
62020+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
62021+
62022+#ifdef CONFIG_X86
62023+ if (!ret) {
62024+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
62025+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
62026+ }
62027+#endif
62028+
62029+ }
62030+#endif
62031+
62032 if (ret)
62033 ret = -EFAULT;
62034
62035@@ -771,6 +847,8 @@ static struct file *do_open_exec(struct filename *name)
62036
62037 fsnotify_open(file);
62038
62039+ trace_open_exec(name->name);
62040+
62041 err = deny_write_access(file);
62042 if (err)
62043 goto exit;
62044@@ -800,7 +878,7 @@ int kernel_read(struct file *file, loff_t offset,
62045 old_fs = get_fs();
62046 set_fs(get_ds());
62047 /* The cast to a user pointer is valid due to the set_fs() */
62048- result = vfs_read(file, (void __user *)addr, count, &pos);
62049+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
62050 set_fs(old_fs);
62051 return result;
62052 }
62053@@ -845,6 +923,7 @@ static int exec_mmap(struct mm_struct *mm)
62054 tsk->mm = mm;
62055 tsk->active_mm = mm;
62056 activate_mm(active_mm, mm);
62057+ populate_stack();
62058 tsk->mm->vmacache_seqnum = 0;
62059 vmacache_flush(tsk);
62060 task_unlock(tsk);
62061@@ -1243,7 +1322,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
62062 }
62063 rcu_read_unlock();
62064
62065- if (p->fs->users > n_fs)
62066+ if (atomic_read(&p->fs->users) > n_fs)
62067 bprm->unsafe |= LSM_UNSAFE_SHARE;
62068 else
62069 p->fs->in_exec = 1;
62070@@ -1419,6 +1498,31 @@ static int exec_binprm(struct linux_binprm *bprm)
62071 return ret;
62072 }
62073
62074+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62075+static DEFINE_PER_CPU(u64, exec_counter);
62076+static int __init init_exec_counters(void)
62077+{
62078+ unsigned int cpu;
62079+
62080+ for_each_possible_cpu(cpu) {
62081+ per_cpu(exec_counter, cpu) = (u64)cpu;
62082+ }
62083+
62084+ return 0;
62085+}
62086+early_initcall(init_exec_counters);
62087+static inline void increment_exec_counter(void)
62088+{
62089+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
62090+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
62091+}
62092+#else
62093+static inline void increment_exec_counter(void) {}
62094+#endif
62095+
62096+extern void gr_handle_exec_args(struct linux_binprm *bprm,
62097+ struct user_arg_ptr argv);
62098+
62099 /*
62100 * sys_execve() executes a new program.
62101 */
62102@@ -1426,6 +1530,11 @@ static int do_execve_common(struct filename *filename,
62103 struct user_arg_ptr argv,
62104 struct user_arg_ptr envp)
62105 {
62106+#ifdef CONFIG_GRKERNSEC
62107+ struct file *old_exec_file;
62108+ struct acl_subject_label *old_acl;
62109+ struct rlimit old_rlim[RLIM_NLIMITS];
62110+#endif
62111 struct linux_binprm *bprm;
62112 struct file *file;
62113 struct files_struct *displaced;
62114@@ -1434,6 +1543,8 @@ static int do_execve_common(struct filename *filename,
62115 if (IS_ERR(filename))
62116 return PTR_ERR(filename);
62117
62118+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
62119+
62120 /*
62121 * We move the actual failure in case of RLIMIT_NPROC excess from
62122 * set*uid() to execve() because too many poorly written programs
62123@@ -1471,11 +1582,21 @@ static int do_execve_common(struct filename *filename,
62124 if (IS_ERR(file))
62125 goto out_unmark;
62126
62127+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
62128+ retval = -EPERM;
62129+ goto out_unmark;
62130+ }
62131+
62132 sched_exec();
62133
62134 bprm->file = file;
62135 bprm->filename = bprm->interp = filename->name;
62136
62137+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
62138+ retval = -EACCES;
62139+ goto out_unmark;
62140+ }
62141+
62142 retval = bprm_mm_init(bprm);
62143 if (retval)
62144 goto out_unmark;
62145@@ -1492,24 +1613,70 @@ static int do_execve_common(struct filename *filename,
62146 if (retval < 0)
62147 goto out;
62148
62149+#ifdef CONFIG_GRKERNSEC
62150+ old_acl = current->acl;
62151+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
62152+ old_exec_file = current->exec_file;
62153+ get_file(file);
62154+ current->exec_file = file;
62155+#endif
62156+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62157+ /* limit suid stack to 8MB
62158+ * we saved the old limits above and will restore them if this exec fails
62159+ */
62160+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
62161+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
62162+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
62163+#endif
62164+
62165+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
62166+ retval = -EPERM;
62167+ goto out_fail;
62168+ }
62169+
62170+ if (!gr_tpe_allow(file)) {
62171+ retval = -EACCES;
62172+ goto out_fail;
62173+ }
62174+
62175+ if (gr_check_crash_exec(file)) {
62176+ retval = -EACCES;
62177+ goto out_fail;
62178+ }
62179+
62180+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
62181+ bprm->unsafe);
62182+ if (retval < 0)
62183+ goto out_fail;
62184+
62185 retval = copy_strings_kernel(1, &bprm->filename, bprm);
62186 if (retval < 0)
62187- goto out;
62188+ goto out_fail;
62189
62190 bprm->exec = bprm->p;
62191 retval = copy_strings(bprm->envc, envp, bprm);
62192 if (retval < 0)
62193- goto out;
62194+ goto out_fail;
62195
62196 retval = copy_strings(bprm->argc, argv, bprm);
62197 if (retval < 0)
62198- goto out;
62199+ goto out_fail;
62200+
62201+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
62202+
62203+ gr_handle_exec_args(bprm, argv);
62204
62205 retval = exec_binprm(bprm);
62206 if (retval < 0)
62207- goto out;
62208+ goto out_fail;
62209+#ifdef CONFIG_GRKERNSEC
62210+ if (old_exec_file)
62211+ fput(old_exec_file);
62212+#endif
62213
62214 /* execve succeeded */
62215+
62216+ increment_exec_counter();
62217 current->fs->in_exec = 0;
62218 current->in_execve = 0;
62219 acct_update_integrals(current);
62220@@ -1520,6 +1687,14 @@ static int do_execve_common(struct filename *filename,
62221 put_files_struct(displaced);
62222 return retval;
62223
62224+out_fail:
62225+#ifdef CONFIG_GRKERNSEC
62226+ current->acl = old_acl;
62227+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
62228+ fput(current->exec_file);
62229+ current->exec_file = old_exec_file;
62230+#endif
62231+
62232 out:
62233 if (bprm->mm) {
62234 acct_arg_size(bprm, 0);
62235@@ -1611,3 +1786,312 @@ COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
62236 return compat_do_execve(getname(filename), argv, envp);
62237 }
62238 #endif
62239+
62240+int pax_check_flags(unsigned long *flags)
62241+{
62242+ int retval = 0;
62243+
62244+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
62245+ if (*flags & MF_PAX_SEGMEXEC)
62246+ {
62247+ *flags &= ~MF_PAX_SEGMEXEC;
62248+ retval = -EINVAL;
62249+ }
62250+#endif
62251+
62252+ if ((*flags & MF_PAX_PAGEEXEC)
62253+
62254+#ifdef CONFIG_PAX_PAGEEXEC
62255+ && (*flags & MF_PAX_SEGMEXEC)
62256+#endif
62257+
62258+ )
62259+ {
62260+ *flags &= ~MF_PAX_PAGEEXEC;
62261+ retval = -EINVAL;
62262+ }
62263+
62264+ if ((*flags & MF_PAX_MPROTECT)
62265+
62266+#ifdef CONFIG_PAX_MPROTECT
62267+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
62268+#endif
62269+
62270+ )
62271+ {
62272+ *flags &= ~MF_PAX_MPROTECT;
62273+ retval = -EINVAL;
62274+ }
62275+
62276+ if ((*flags & MF_PAX_EMUTRAMP)
62277+
62278+#ifdef CONFIG_PAX_EMUTRAMP
62279+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
62280+#endif
62281+
62282+ )
62283+ {
62284+ *flags &= ~MF_PAX_EMUTRAMP;
62285+ retval = -EINVAL;
62286+ }
62287+
62288+ return retval;
62289+}
62290+
62291+EXPORT_SYMBOL(pax_check_flags);
62292+
62293+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
62294+char *pax_get_path(const struct path *path, char *buf, int buflen)
62295+{
62296+ char *pathname = d_path(path, buf, buflen);
62297+
62298+ if (IS_ERR(pathname))
62299+ goto toolong;
62300+
62301+ pathname = mangle_path(buf, pathname, "\t\n\\");
62302+ if (!pathname)
62303+ goto toolong;
62304+
62305+ *pathname = 0;
62306+ return buf;
62307+
62308+toolong:
62309+ return "<path too long>";
62310+}
62311+EXPORT_SYMBOL(pax_get_path);
62312+
62313+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
62314+{
62315+ struct task_struct *tsk = current;
62316+ struct mm_struct *mm = current->mm;
62317+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
62318+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
62319+ char *path_exec = NULL;
62320+ char *path_fault = NULL;
62321+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
62322+ siginfo_t info = { };
62323+
62324+ if (buffer_exec && buffer_fault) {
62325+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
62326+
62327+ down_read(&mm->mmap_sem);
62328+ vma = mm->mmap;
62329+ while (vma && (!vma_exec || !vma_fault)) {
62330+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
62331+ vma_exec = vma;
62332+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
62333+ vma_fault = vma;
62334+ vma = vma->vm_next;
62335+ }
62336+ if (vma_exec)
62337+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
62338+ if (vma_fault) {
62339+ start = vma_fault->vm_start;
62340+ end = vma_fault->vm_end;
62341+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
62342+ if (vma_fault->vm_file)
62343+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
62344+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
62345+ path_fault = "<heap>";
62346+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
62347+ path_fault = "<stack>";
62348+ else
62349+ path_fault = "<anonymous mapping>";
62350+ }
62351+ up_read(&mm->mmap_sem);
62352+ }
62353+ if (tsk->signal->curr_ip)
62354+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
62355+ else
62356+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
62357+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
62358+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
62359+ free_page((unsigned long)buffer_exec);
62360+ free_page((unsigned long)buffer_fault);
62361+ pax_report_insns(regs, pc, sp);
62362+ info.si_signo = SIGKILL;
62363+ info.si_errno = 0;
62364+ info.si_code = SI_KERNEL;
62365+ info.si_pid = 0;
62366+ info.si_uid = 0;
62367+ do_coredump(&info);
62368+}
62369+#endif
62370+
62371+#ifdef CONFIG_PAX_REFCOUNT
62372+void pax_report_refcount_overflow(struct pt_regs *regs)
62373+{
62374+ if (current->signal->curr_ip)
62375+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
62376+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
62377+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
62378+ else
62379+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
62380+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
62381+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
62382+ preempt_disable();
62383+ show_regs(regs);
62384+ preempt_enable();
62385+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
62386+}
62387+#endif
62388+
62389+#ifdef CONFIG_PAX_USERCOPY
62390+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
62391+static noinline int check_stack_object(const void *obj, unsigned long len)
62392+{
62393+ const void * const stack = task_stack_page(current);
62394+ const void * const stackend = stack + THREAD_SIZE;
62395+
62396+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
62397+ const void *frame = NULL;
62398+ const void *oldframe;
62399+#endif
62400+
62401+ if (obj + len < obj)
62402+ return -1;
62403+
62404+ if (obj + len <= stack || stackend <= obj)
62405+ return 0;
62406+
62407+ if (obj < stack || stackend < obj + len)
62408+ return -1;
62409+
62410+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
62411+ oldframe = __builtin_frame_address(1);
62412+ if (oldframe)
62413+ frame = __builtin_frame_address(2);
62414+ /*
62415+ low ----------------------------------------------> high
62416+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
62417+ ^----------------^
62418+ allow copies only within here
62419+ */
62420+ while (stack <= frame && frame < stackend) {
62421+ /* if obj + len extends past the last frame, this
62422+ check won't pass and the next frame will be 0,
62423+ causing us to bail out and correctly report
62424+ the copy as invalid
62425+ */
62426+ if (obj + len <= frame)
62427+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
62428+ oldframe = frame;
62429+ frame = *(const void * const *)frame;
62430+ }
62431+ return -1;
62432+#else
62433+ return 1;
62434+#endif
62435+}
62436+
62437+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
62438+{
62439+ if (current->signal->curr_ip)
62440+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
62441+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
62442+ else
62443+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
62444+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
62445+ dump_stack();
62446+ gr_handle_kernel_exploit();
62447+ do_group_exit(SIGKILL);
62448+}
62449+#endif
62450+
62451+#ifdef CONFIG_PAX_USERCOPY
62452+
62453+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
62454+{
62455+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
62456+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
62457+#ifdef CONFIG_MODULES
62458+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
62459+#else
62460+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
62461+#endif
62462+
62463+#else
62464+ unsigned long textlow = (unsigned long)_stext;
62465+ unsigned long texthigh = (unsigned long)_etext;
62466+
62467+#ifdef CONFIG_X86_64
62468+ /* check against linear mapping as well */
62469+ if (high > (unsigned long)__va(__pa(textlow)) &&
62470+ low < (unsigned long)__va(__pa(texthigh)))
62471+ return true;
62472+#endif
62473+
62474+#endif
62475+
62476+ if (high <= textlow || low >= texthigh)
62477+ return false;
62478+ else
62479+ return true;
62480+}
62481+#endif
62482+
62483+void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size)
62484+{
62485+#ifdef CONFIG_PAX_USERCOPY
62486+ const char *type;
62487+#endif
62488+
62489+#ifndef CONFIG_STACK_GROWSUP
62490+ unsigned long stackstart = (unsigned long)task_stack_page(current);
62491+ unsigned long currentsp = (unsigned long)&stackstart;
62492+ if (unlikely((currentsp < stackstart + 512 ||
62493+ currentsp >= stackstart + THREAD_SIZE) && !in_interrupt()))
62494+ BUG();
62495+#endif
62496+
62497+#ifndef CONFIG_PAX_USERCOPY_DEBUG
62498+ if (const_size)
62499+ return;
62500+#endif
62501+
62502+#ifdef CONFIG_PAX_USERCOPY
62503+ if (!n)
62504+ return;
62505+
62506+ type = check_heap_object(ptr, n);
62507+ if (!type) {
62508+ int ret = check_stack_object(ptr, n);
62509+ if (ret == 1 || ret == 2)
62510+ return;
62511+ if (ret == 0) {
62512+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
62513+ type = "<kernel text>";
62514+ else
62515+ return;
62516+ } else
62517+ type = "<process stack>";
62518+ }
62519+
62520+ pax_report_usercopy(ptr, n, to_user, type);
62521+#endif
62522+
62523+}
62524+EXPORT_SYMBOL(__check_object_size);
62525+
62526+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
62527+void pax_track_stack(void)
62528+{
62529+ unsigned long sp = (unsigned long)&sp;
62530+ if (sp < current_thread_info()->lowest_stack &&
62531+ sp > (unsigned long)task_stack_page(current))
62532+ current_thread_info()->lowest_stack = sp;
62533+ if (unlikely((sp & ~(THREAD_SIZE - 1)) < (THREAD_SIZE/16)))
62534+ BUG();
62535+}
62536+EXPORT_SYMBOL(pax_track_stack);
62537+#endif
62538+
62539+#ifdef CONFIG_PAX_SIZE_OVERFLOW
62540+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
62541+{
62542+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
62543+ dump_stack();
62544+ do_group_exit(SIGKILL);
62545+}
62546+EXPORT_SYMBOL(report_size_overflow);
62547+#endif
62548diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
62549index 9f9992b..8b59411 100644
62550--- a/fs/ext2/balloc.c
62551+++ b/fs/ext2/balloc.c
62552@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
62553
62554 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
62555 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
62556- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
62557+ if (free_blocks < root_blocks + 1 &&
62558 !uid_eq(sbi->s_resuid, current_fsuid()) &&
62559 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
62560- !in_group_p (sbi->s_resgid))) {
62561+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
62562 return 0;
62563 }
62564 return 1;
62565diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
62566index 9142614..97484fa 100644
62567--- a/fs/ext2/xattr.c
62568+++ b/fs/ext2/xattr.c
62569@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
62570 struct buffer_head *bh = NULL;
62571 struct ext2_xattr_entry *entry;
62572 char *end;
62573- size_t rest = buffer_size;
62574+ size_t rest = buffer_size, total_size = 0;
62575 int error;
62576
62577 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
62578@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
62579 buffer += size;
62580 }
62581 rest -= size;
62582+ total_size += size;
62583 }
62584 }
62585- error = buffer_size - rest; /* total size */
62586+ error = total_size;
62587
62588 cleanup:
62589 brelse(bh);
62590diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
62591index 158b5d4..2432610 100644
62592--- a/fs/ext3/balloc.c
62593+++ b/fs/ext3/balloc.c
62594@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
62595
62596 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
62597 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
62598- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
62599+ if (free_blocks < root_blocks + 1 &&
62600 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
62601 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
62602- !in_group_p (sbi->s_resgid))) {
62603+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
62604 return 0;
62605 }
62606 return 1;
62607diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
62608index c6874be..f8a6ae8 100644
62609--- a/fs/ext3/xattr.c
62610+++ b/fs/ext3/xattr.c
62611@@ -330,7 +330,7 @@ static int
62612 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
62613 char *buffer, size_t buffer_size)
62614 {
62615- size_t rest = buffer_size;
62616+ size_t rest = buffer_size, total_size = 0;
62617
62618 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
62619 const struct xattr_handler *handler =
62620@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
62621 buffer += size;
62622 }
62623 rest -= size;
62624+ total_size += size;
62625 }
62626 }
62627- return buffer_size - rest;
62628+ return total_size;
62629 }
62630
62631 static int
62632diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
62633index 581ef40..cec52d7 100644
62634--- a/fs/ext4/balloc.c
62635+++ b/fs/ext4/balloc.c
62636@@ -553,8 +553,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
62637 /* Hm, nope. Are (enough) root reserved clusters available? */
62638 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
62639 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
62640- capable(CAP_SYS_RESOURCE) ||
62641- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
62642+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
62643+ capable_nolog(CAP_SYS_RESOURCE)) {
62644
62645 if (free_clusters >= (nclusters + dirty_clusters +
62646 resv_clusters))
62647diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
62648index b0c225c..0e69bd7 100644
62649--- a/fs/ext4/ext4.h
62650+++ b/fs/ext4/ext4.h
62651@@ -1275,19 +1275,19 @@ struct ext4_sb_info {
62652 unsigned long s_mb_last_start;
62653
62654 /* stats for buddy allocator */
62655- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
62656- atomic_t s_bal_success; /* we found long enough chunks */
62657- atomic_t s_bal_allocated; /* in blocks */
62658- atomic_t s_bal_ex_scanned; /* total extents scanned */
62659- atomic_t s_bal_goals; /* goal hits */
62660- atomic_t s_bal_breaks; /* too long searches */
62661- atomic_t s_bal_2orders; /* 2^order hits */
62662+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
62663+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
62664+ atomic_unchecked_t s_bal_allocated; /* in blocks */
62665+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
62666+ atomic_unchecked_t s_bal_goals; /* goal hits */
62667+ atomic_unchecked_t s_bal_breaks; /* too long searches */
62668+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
62669 spinlock_t s_bal_lock;
62670 unsigned long s_mb_buddies_generated;
62671 unsigned long long s_mb_generation_time;
62672- atomic_t s_mb_lost_chunks;
62673- atomic_t s_mb_preallocated;
62674- atomic_t s_mb_discarded;
62675+ atomic_unchecked_t s_mb_lost_chunks;
62676+ atomic_unchecked_t s_mb_preallocated;
62677+ atomic_unchecked_t s_mb_discarded;
62678 atomic_t s_lock_busy;
62679
62680 /* locality groups */
62681diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
62682index 8b0f9ef..cb9f620 100644
62683--- a/fs/ext4/mballoc.c
62684+++ b/fs/ext4/mballoc.c
62685@@ -1901,7 +1901,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
62686 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
62687
62688 if (EXT4_SB(sb)->s_mb_stats)
62689- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
62690+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
62691
62692 break;
62693 }
62694@@ -2211,7 +2211,7 @@ repeat:
62695 ac->ac_status = AC_STATUS_CONTINUE;
62696 ac->ac_flags |= EXT4_MB_HINT_FIRST;
62697 cr = 3;
62698- atomic_inc(&sbi->s_mb_lost_chunks);
62699+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
62700 goto repeat;
62701 }
62702 }
62703@@ -2717,25 +2717,25 @@ int ext4_mb_release(struct super_block *sb)
62704 if (sbi->s_mb_stats) {
62705 ext4_msg(sb, KERN_INFO,
62706 "mballoc: %u blocks %u reqs (%u success)",
62707- atomic_read(&sbi->s_bal_allocated),
62708- atomic_read(&sbi->s_bal_reqs),
62709- atomic_read(&sbi->s_bal_success));
62710+ atomic_read_unchecked(&sbi->s_bal_allocated),
62711+ atomic_read_unchecked(&sbi->s_bal_reqs),
62712+ atomic_read_unchecked(&sbi->s_bal_success));
62713 ext4_msg(sb, KERN_INFO,
62714 "mballoc: %u extents scanned, %u goal hits, "
62715 "%u 2^N hits, %u breaks, %u lost",
62716- atomic_read(&sbi->s_bal_ex_scanned),
62717- atomic_read(&sbi->s_bal_goals),
62718- atomic_read(&sbi->s_bal_2orders),
62719- atomic_read(&sbi->s_bal_breaks),
62720- atomic_read(&sbi->s_mb_lost_chunks));
62721+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
62722+ atomic_read_unchecked(&sbi->s_bal_goals),
62723+ atomic_read_unchecked(&sbi->s_bal_2orders),
62724+ atomic_read_unchecked(&sbi->s_bal_breaks),
62725+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
62726 ext4_msg(sb, KERN_INFO,
62727 "mballoc: %lu generated and it took %Lu",
62728 sbi->s_mb_buddies_generated,
62729 sbi->s_mb_generation_time);
62730 ext4_msg(sb, KERN_INFO,
62731 "mballoc: %u preallocated, %u discarded",
62732- atomic_read(&sbi->s_mb_preallocated),
62733- atomic_read(&sbi->s_mb_discarded));
62734+ atomic_read_unchecked(&sbi->s_mb_preallocated),
62735+ atomic_read_unchecked(&sbi->s_mb_discarded));
62736 }
62737
62738 free_percpu(sbi->s_locality_groups);
62739@@ -3192,16 +3192,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
62740 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
62741
62742 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
62743- atomic_inc(&sbi->s_bal_reqs);
62744- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
62745+ atomic_inc_unchecked(&sbi->s_bal_reqs);
62746+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
62747 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
62748- atomic_inc(&sbi->s_bal_success);
62749- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
62750+ atomic_inc_unchecked(&sbi->s_bal_success);
62751+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
62752 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
62753 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
62754- atomic_inc(&sbi->s_bal_goals);
62755+ atomic_inc_unchecked(&sbi->s_bal_goals);
62756 if (ac->ac_found > sbi->s_mb_max_to_scan)
62757- atomic_inc(&sbi->s_bal_breaks);
62758+ atomic_inc_unchecked(&sbi->s_bal_breaks);
62759 }
62760
62761 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
62762@@ -3628,7 +3628,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
62763 trace_ext4_mb_new_inode_pa(ac, pa);
62764
62765 ext4_mb_use_inode_pa(ac, pa);
62766- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
62767+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
62768
62769 ei = EXT4_I(ac->ac_inode);
62770 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
62771@@ -3688,7 +3688,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
62772 trace_ext4_mb_new_group_pa(ac, pa);
62773
62774 ext4_mb_use_group_pa(ac, pa);
62775- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
62776+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
62777
62778 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
62779 lg = ac->ac_lg;
62780@@ -3777,7 +3777,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
62781 * from the bitmap and continue.
62782 */
62783 }
62784- atomic_add(free, &sbi->s_mb_discarded);
62785+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
62786
62787 return err;
62788 }
62789@@ -3795,7 +3795,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
62790 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
62791 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
62792 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
62793- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
62794+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
62795 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
62796
62797 return 0;
62798diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
62799index 32bce84..112d969 100644
62800--- a/fs/ext4/mmp.c
62801+++ b/fs/ext4/mmp.c
62802@@ -113,7 +113,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
62803 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
62804 const char *function, unsigned int line, const char *msg)
62805 {
62806- __ext4_warning(sb, function, line, msg);
62807+ __ext4_warning(sb, function, line, "%s", msg);
62808 __ext4_warning(sb, function, line,
62809 "MMP failure info: last update time: %llu, last update "
62810 "node: %s, last update device: %s\n",
62811diff --git a/fs/ext4/super.c b/fs/ext4/super.c
62812index 0b28b36..b85d0f53 100644
62813--- a/fs/ext4/super.c
62814+++ b/fs/ext4/super.c
62815@@ -1276,7 +1276,7 @@ static ext4_fsblk_t get_sb_block(void **data)
62816 }
62817
62818 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
62819-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
62820+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
62821 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
62822
62823 #ifdef CONFIG_QUOTA
62824@@ -2460,7 +2460,7 @@ struct ext4_attr {
62825 int offset;
62826 int deprecated_val;
62827 } u;
62828-};
62829+} __do_const;
62830
62831 static int parse_strtoull(const char *buf,
62832 unsigned long long max, unsigned long long *value)
62833diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
62834index e738733..9843a6c 100644
62835--- a/fs/ext4/xattr.c
62836+++ b/fs/ext4/xattr.c
62837@@ -386,7 +386,7 @@ static int
62838 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
62839 char *buffer, size_t buffer_size)
62840 {
62841- size_t rest = buffer_size;
62842+ size_t rest = buffer_size, total_size = 0;
62843
62844 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
62845 const struct xattr_handler *handler =
62846@@ -403,9 +403,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
62847 buffer += size;
62848 }
62849 rest -= size;
62850+ total_size += size;
62851 }
62852 }
62853- return buffer_size - rest;
62854+ return total_size;
62855 }
62856
62857 static int
62858diff --git a/fs/fcntl.c b/fs/fcntl.c
62859index 22d1c3d..600cf7e 100644
62860--- a/fs/fcntl.c
62861+++ b/fs/fcntl.c
62862@@ -107,6 +107,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
62863 if (err)
62864 return err;
62865
62866+ if (gr_handle_chroot_fowner(pid, type))
62867+ return -ENOENT;
62868+ if (gr_check_protected_task_fowner(pid, type))
62869+ return -EACCES;
62870+
62871 f_modown(filp, pid, type, force);
62872 return 0;
62873 }
62874diff --git a/fs/fhandle.c b/fs/fhandle.c
62875index 999ff5c..ac037c9 100644
62876--- a/fs/fhandle.c
62877+++ b/fs/fhandle.c
62878@@ -8,6 +8,7 @@
62879 #include <linux/fs_struct.h>
62880 #include <linux/fsnotify.h>
62881 #include <linux/personality.h>
62882+#include <linux/grsecurity.h>
62883 #include <asm/uaccess.h>
62884 #include "internal.h"
62885 #include "mount.h"
62886@@ -67,8 +68,7 @@ static long do_sys_name_to_handle(struct path *path,
62887 } else
62888 retval = 0;
62889 /* copy the mount id */
62890- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
62891- sizeof(*mnt_id)) ||
62892+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
62893 copy_to_user(ufh, handle,
62894 sizeof(struct file_handle) + handle_bytes))
62895 retval = -EFAULT;
62896@@ -175,7 +175,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
62897 * the directory. Ideally we would like CAP_DAC_SEARCH.
62898 * But we don't have that
62899 */
62900- if (!capable(CAP_DAC_READ_SEARCH)) {
62901+ if (!capable(CAP_DAC_READ_SEARCH) || !gr_chroot_fhandle()) {
62902 retval = -EPERM;
62903 goto out_err;
62904 }
62905diff --git a/fs/file.c b/fs/file.c
62906index 66923fe..2849783 100644
62907--- a/fs/file.c
62908+++ b/fs/file.c
62909@@ -16,6 +16,7 @@
62910 #include <linux/slab.h>
62911 #include <linux/vmalloc.h>
62912 #include <linux/file.h>
62913+#include <linux/security.h>
62914 #include <linux/fdtable.h>
62915 #include <linux/bitops.h>
62916 #include <linux/interrupt.h>
62917@@ -139,7 +140,7 @@ out:
62918 * Return <0 error code on error; 1 on successful completion.
62919 * The files->file_lock should be held on entry, and will be held on exit.
62920 */
62921-static int expand_fdtable(struct files_struct *files, int nr)
62922+static int expand_fdtable(struct files_struct *files, unsigned int nr)
62923 __releases(files->file_lock)
62924 __acquires(files->file_lock)
62925 {
62926@@ -184,7 +185,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
62927 * expanded and execution may have blocked.
62928 * The files->file_lock should be held on entry, and will be held on exit.
62929 */
62930-static int expand_files(struct files_struct *files, int nr)
62931+static int expand_files(struct files_struct *files, unsigned int nr)
62932 {
62933 struct fdtable *fdt;
62934
62935@@ -799,6 +800,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
62936 if (!file)
62937 return __close_fd(files, fd);
62938
62939+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
62940 if (fd >= rlimit(RLIMIT_NOFILE))
62941 return -EBADF;
62942
62943@@ -825,6 +827,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
62944 if (unlikely(oldfd == newfd))
62945 return -EINVAL;
62946
62947+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
62948 if (newfd >= rlimit(RLIMIT_NOFILE))
62949 return -EBADF;
62950
62951@@ -880,6 +883,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
62952 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
62953 {
62954 int err;
62955+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
62956 if (from >= rlimit(RLIMIT_NOFILE))
62957 return -EINVAL;
62958 err = alloc_fd(from, flags);
62959diff --git a/fs/filesystems.c b/fs/filesystems.c
62960index 5797d45..7d7d79a 100644
62961--- a/fs/filesystems.c
62962+++ b/fs/filesystems.c
62963@@ -275,7 +275,11 @@ struct file_system_type *get_fs_type(const char *name)
62964 int len = dot ? dot - name : strlen(name);
62965
62966 fs = __get_fs_type(name, len);
62967+#ifdef CONFIG_GRKERNSEC_MODHARDEN
62968+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
62969+#else
62970 if (!fs && (request_module("fs-%.*s", len, name) == 0))
62971+#endif
62972 fs = __get_fs_type(name, len);
62973
62974 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
62975diff --git a/fs/fs_struct.c b/fs/fs_struct.c
62976index 7dca743..543d620 100644
62977--- a/fs/fs_struct.c
62978+++ b/fs/fs_struct.c
62979@@ -4,6 +4,7 @@
62980 #include <linux/path.h>
62981 #include <linux/slab.h>
62982 #include <linux/fs_struct.h>
62983+#include <linux/grsecurity.h>
62984 #include "internal.h"
62985
62986 /*
62987@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
62988 write_seqcount_begin(&fs->seq);
62989 old_root = fs->root;
62990 fs->root = *path;
62991+ gr_set_chroot_entries(current, path);
62992 write_seqcount_end(&fs->seq);
62993 spin_unlock(&fs->lock);
62994 if (old_root.dentry)
62995@@ -67,6 +69,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
62996 int hits = 0;
62997 spin_lock(&fs->lock);
62998 write_seqcount_begin(&fs->seq);
62999+ /* this root replacement is only done by pivot_root,
63000+ leave grsec's chroot tagging alone for this task
63001+ so that a pivoted root isn't treated as a chroot
63002+ */
63003 hits += replace_path(&fs->root, old_root, new_root);
63004 hits += replace_path(&fs->pwd, old_root, new_root);
63005 write_seqcount_end(&fs->seq);
63006@@ -99,7 +105,8 @@ void exit_fs(struct task_struct *tsk)
63007 task_lock(tsk);
63008 spin_lock(&fs->lock);
63009 tsk->fs = NULL;
63010- kill = !--fs->users;
63011+ gr_clear_chroot_entries(tsk);
63012+ kill = !atomic_dec_return(&fs->users);
63013 spin_unlock(&fs->lock);
63014 task_unlock(tsk);
63015 if (kill)
63016@@ -112,7 +119,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
63017 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
63018 /* We don't need to lock fs - think why ;-) */
63019 if (fs) {
63020- fs->users = 1;
63021+ atomic_set(&fs->users, 1);
63022 fs->in_exec = 0;
63023 spin_lock_init(&fs->lock);
63024 seqcount_init(&fs->seq);
63025@@ -121,6 +128,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
63026 spin_lock(&old->lock);
63027 fs->root = old->root;
63028 path_get(&fs->root);
63029+ /* instead of calling gr_set_chroot_entries here,
63030+ we call it from every caller of this function
63031+ */
63032 fs->pwd = old->pwd;
63033 path_get(&fs->pwd);
63034 spin_unlock(&old->lock);
63035@@ -139,8 +149,9 @@ int unshare_fs_struct(void)
63036
63037 task_lock(current);
63038 spin_lock(&fs->lock);
63039- kill = !--fs->users;
63040+ kill = !atomic_dec_return(&fs->users);
63041 current->fs = new_fs;
63042+ gr_set_chroot_entries(current, &new_fs->root);
63043 spin_unlock(&fs->lock);
63044 task_unlock(current);
63045
63046@@ -153,13 +164,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
63047
63048 int current_umask(void)
63049 {
63050- return current->fs->umask;
63051+ return current->fs->umask | gr_acl_umask();
63052 }
63053 EXPORT_SYMBOL(current_umask);
63054
63055 /* to be mentioned only in INIT_TASK */
63056 struct fs_struct init_fs = {
63057- .users = 1,
63058+ .users = ATOMIC_INIT(1),
63059 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
63060 .seq = SEQCNT_ZERO(init_fs.seq),
63061 .umask = 0022,
63062diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
63063index 89acec7..a575262 100644
63064--- a/fs/fscache/cookie.c
63065+++ b/fs/fscache/cookie.c
63066@@ -19,7 +19,7 @@
63067
63068 struct kmem_cache *fscache_cookie_jar;
63069
63070-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
63071+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
63072
63073 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
63074 static int fscache_alloc_object(struct fscache_cache *cache,
63075@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
63076 parent ? (char *) parent->def->name : "<no-parent>",
63077 def->name, netfs_data, enable);
63078
63079- fscache_stat(&fscache_n_acquires);
63080+ fscache_stat_unchecked(&fscache_n_acquires);
63081
63082 /* if there's no parent cookie, then we don't create one here either */
63083 if (!parent) {
63084- fscache_stat(&fscache_n_acquires_null);
63085+ fscache_stat_unchecked(&fscache_n_acquires_null);
63086 _leave(" [no parent]");
63087 return NULL;
63088 }
63089@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
63090 /* allocate and initialise a cookie */
63091 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
63092 if (!cookie) {
63093- fscache_stat(&fscache_n_acquires_oom);
63094+ fscache_stat_unchecked(&fscache_n_acquires_oom);
63095 _leave(" [ENOMEM]");
63096 return NULL;
63097 }
63098@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
63099
63100 switch (cookie->def->type) {
63101 case FSCACHE_COOKIE_TYPE_INDEX:
63102- fscache_stat(&fscache_n_cookie_index);
63103+ fscache_stat_unchecked(&fscache_n_cookie_index);
63104 break;
63105 case FSCACHE_COOKIE_TYPE_DATAFILE:
63106- fscache_stat(&fscache_n_cookie_data);
63107+ fscache_stat_unchecked(&fscache_n_cookie_data);
63108 break;
63109 default:
63110- fscache_stat(&fscache_n_cookie_special);
63111+ fscache_stat_unchecked(&fscache_n_cookie_special);
63112 break;
63113 }
63114
63115@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
63116 } else {
63117 atomic_dec(&parent->n_children);
63118 __fscache_cookie_put(cookie);
63119- fscache_stat(&fscache_n_acquires_nobufs);
63120+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
63121 _leave(" = NULL");
63122 return NULL;
63123 }
63124@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
63125 }
63126 }
63127
63128- fscache_stat(&fscache_n_acquires_ok);
63129+ fscache_stat_unchecked(&fscache_n_acquires_ok);
63130 _leave(" = %p", cookie);
63131 return cookie;
63132 }
63133@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
63134 cache = fscache_select_cache_for_object(cookie->parent);
63135 if (!cache) {
63136 up_read(&fscache_addremove_sem);
63137- fscache_stat(&fscache_n_acquires_no_cache);
63138+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
63139 _leave(" = -ENOMEDIUM [no cache]");
63140 return -ENOMEDIUM;
63141 }
63142@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
63143 object = cache->ops->alloc_object(cache, cookie);
63144 fscache_stat_d(&fscache_n_cop_alloc_object);
63145 if (IS_ERR(object)) {
63146- fscache_stat(&fscache_n_object_no_alloc);
63147+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
63148 ret = PTR_ERR(object);
63149 goto error;
63150 }
63151
63152- fscache_stat(&fscache_n_object_alloc);
63153+ fscache_stat_unchecked(&fscache_n_object_alloc);
63154
63155- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
63156+ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
63157
63158 _debug("ALLOC OBJ%x: %s {%lx}",
63159 object->debug_id, cookie->def->name, object->events);
63160@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
63161
63162 _enter("{%s}", cookie->def->name);
63163
63164- fscache_stat(&fscache_n_invalidates);
63165+ fscache_stat_unchecked(&fscache_n_invalidates);
63166
63167 /* Only permit invalidation of data files. Invalidating an index will
63168 * require the caller to release all its attachments to the tree rooted
63169@@ -476,10 +476,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
63170 {
63171 struct fscache_object *object;
63172
63173- fscache_stat(&fscache_n_updates);
63174+ fscache_stat_unchecked(&fscache_n_updates);
63175
63176 if (!cookie) {
63177- fscache_stat(&fscache_n_updates_null);
63178+ fscache_stat_unchecked(&fscache_n_updates_null);
63179 _leave(" [no cookie]");
63180 return;
63181 }
63182@@ -580,12 +580,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
63183 */
63184 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
63185 {
63186- fscache_stat(&fscache_n_relinquishes);
63187+ fscache_stat_unchecked(&fscache_n_relinquishes);
63188 if (retire)
63189- fscache_stat(&fscache_n_relinquishes_retire);
63190+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
63191
63192 if (!cookie) {
63193- fscache_stat(&fscache_n_relinquishes_null);
63194+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
63195 _leave(" [no cookie]");
63196 return;
63197 }
63198@@ -686,7 +686,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
63199 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
63200 goto inconsistent;
63201
63202- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
63203+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
63204
63205 __fscache_use_cookie(cookie);
63206 if (fscache_submit_op(object, op) < 0)
63207diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
63208index 7872a62..d91b19f 100644
63209--- a/fs/fscache/internal.h
63210+++ b/fs/fscache/internal.h
63211@@ -137,8 +137,8 @@ extern void fscache_operation_gc(struct work_struct *);
63212 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
63213 extern int fscache_wait_for_operation_activation(struct fscache_object *,
63214 struct fscache_operation *,
63215- atomic_t *,
63216- atomic_t *,
63217+ atomic_unchecked_t *,
63218+ atomic_unchecked_t *,
63219 void (*)(struct fscache_operation *));
63220 extern void fscache_invalidate_writes(struct fscache_cookie *);
63221
63222@@ -157,101 +157,101 @@ extern void fscache_proc_cleanup(void);
63223 * stats.c
63224 */
63225 #ifdef CONFIG_FSCACHE_STATS
63226-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
63227-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
63228+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
63229+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
63230
63231-extern atomic_t fscache_n_op_pend;
63232-extern atomic_t fscache_n_op_run;
63233-extern atomic_t fscache_n_op_enqueue;
63234-extern atomic_t fscache_n_op_deferred_release;
63235-extern atomic_t fscache_n_op_release;
63236-extern atomic_t fscache_n_op_gc;
63237-extern atomic_t fscache_n_op_cancelled;
63238-extern atomic_t fscache_n_op_rejected;
63239+extern atomic_unchecked_t fscache_n_op_pend;
63240+extern atomic_unchecked_t fscache_n_op_run;
63241+extern atomic_unchecked_t fscache_n_op_enqueue;
63242+extern atomic_unchecked_t fscache_n_op_deferred_release;
63243+extern atomic_unchecked_t fscache_n_op_release;
63244+extern atomic_unchecked_t fscache_n_op_gc;
63245+extern atomic_unchecked_t fscache_n_op_cancelled;
63246+extern atomic_unchecked_t fscache_n_op_rejected;
63247
63248-extern atomic_t fscache_n_attr_changed;
63249-extern atomic_t fscache_n_attr_changed_ok;
63250-extern atomic_t fscache_n_attr_changed_nobufs;
63251-extern atomic_t fscache_n_attr_changed_nomem;
63252-extern atomic_t fscache_n_attr_changed_calls;
63253+extern atomic_unchecked_t fscache_n_attr_changed;
63254+extern atomic_unchecked_t fscache_n_attr_changed_ok;
63255+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
63256+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
63257+extern atomic_unchecked_t fscache_n_attr_changed_calls;
63258
63259-extern atomic_t fscache_n_allocs;
63260-extern atomic_t fscache_n_allocs_ok;
63261-extern atomic_t fscache_n_allocs_wait;
63262-extern atomic_t fscache_n_allocs_nobufs;
63263-extern atomic_t fscache_n_allocs_intr;
63264-extern atomic_t fscache_n_allocs_object_dead;
63265-extern atomic_t fscache_n_alloc_ops;
63266-extern atomic_t fscache_n_alloc_op_waits;
63267+extern atomic_unchecked_t fscache_n_allocs;
63268+extern atomic_unchecked_t fscache_n_allocs_ok;
63269+extern atomic_unchecked_t fscache_n_allocs_wait;
63270+extern atomic_unchecked_t fscache_n_allocs_nobufs;
63271+extern atomic_unchecked_t fscache_n_allocs_intr;
63272+extern atomic_unchecked_t fscache_n_allocs_object_dead;
63273+extern atomic_unchecked_t fscache_n_alloc_ops;
63274+extern atomic_unchecked_t fscache_n_alloc_op_waits;
63275
63276-extern atomic_t fscache_n_retrievals;
63277-extern atomic_t fscache_n_retrievals_ok;
63278-extern atomic_t fscache_n_retrievals_wait;
63279-extern atomic_t fscache_n_retrievals_nodata;
63280-extern atomic_t fscache_n_retrievals_nobufs;
63281-extern atomic_t fscache_n_retrievals_intr;
63282-extern atomic_t fscache_n_retrievals_nomem;
63283-extern atomic_t fscache_n_retrievals_object_dead;
63284-extern atomic_t fscache_n_retrieval_ops;
63285-extern atomic_t fscache_n_retrieval_op_waits;
63286+extern atomic_unchecked_t fscache_n_retrievals;
63287+extern atomic_unchecked_t fscache_n_retrievals_ok;
63288+extern atomic_unchecked_t fscache_n_retrievals_wait;
63289+extern atomic_unchecked_t fscache_n_retrievals_nodata;
63290+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
63291+extern atomic_unchecked_t fscache_n_retrievals_intr;
63292+extern atomic_unchecked_t fscache_n_retrievals_nomem;
63293+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
63294+extern atomic_unchecked_t fscache_n_retrieval_ops;
63295+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
63296
63297-extern atomic_t fscache_n_stores;
63298-extern atomic_t fscache_n_stores_ok;
63299-extern atomic_t fscache_n_stores_again;
63300-extern atomic_t fscache_n_stores_nobufs;
63301-extern atomic_t fscache_n_stores_oom;
63302-extern atomic_t fscache_n_store_ops;
63303-extern atomic_t fscache_n_store_calls;
63304-extern atomic_t fscache_n_store_pages;
63305-extern atomic_t fscache_n_store_radix_deletes;
63306-extern atomic_t fscache_n_store_pages_over_limit;
63307+extern atomic_unchecked_t fscache_n_stores;
63308+extern atomic_unchecked_t fscache_n_stores_ok;
63309+extern atomic_unchecked_t fscache_n_stores_again;
63310+extern atomic_unchecked_t fscache_n_stores_nobufs;
63311+extern atomic_unchecked_t fscache_n_stores_oom;
63312+extern atomic_unchecked_t fscache_n_store_ops;
63313+extern atomic_unchecked_t fscache_n_store_calls;
63314+extern atomic_unchecked_t fscache_n_store_pages;
63315+extern atomic_unchecked_t fscache_n_store_radix_deletes;
63316+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
63317
63318-extern atomic_t fscache_n_store_vmscan_not_storing;
63319-extern atomic_t fscache_n_store_vmscan_gone;
63320-extern atomic_t fscache_n_store_vmscan_busy;
63321-extern atomic_t fscache_n_store_vmscan_cancelled;
63322-extern atomic_t fscache_n_store_vmscan_wait;
63323+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
63324+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
63325+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
63326+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
63327+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
63328
63329-extern atomic_t fscache_n_marks;
63330-extern atomic_t fscache_n_uncaches;
63331+extern atomic_unchecked_t fscache_n_marks;
63332+extern atomic_unchecked_t fscache_n_uncaches;
63333
63334-extern atomic_t fscache_n_acquires;
63335-extern atomic_t fscache_n_acquires_null;
63336-extern atomic_t fscache_n_acquires_no_cache;
63337-extern atomic_t fscache_n_acquires_ok;
63338-extern atomic_t fscache_n_acquires_nobufs;
63339-extern atomic_t fscache_n_acquires_oom;
63340+extern atomic_unchecked_t fscache_n_acquires;
63341+extern atomic_unchecked_t fscache_n_acquires_null;
63342+extern atomic_unchecked_t fscache_n_acquires_no_cache;
63343+extern atomic_unchecked_t fscache_n_acquires_ok;
63344+extern atomic_unchecked_t fscache_n_acquires_nobufs;
63345+extern atomic_unchecked_t fscache_n_acquires_oom;
63346
63347-extern atomic_t fscache_n_invalidates;
63348-extern atomic_t fscache_n_invalidates_run;
63349+extern atomic_unchecked_t fscache_n_invalidates;
63350+extern atomic_unchecked_t fscache_n_invalidates_run;
63351
63352-extern atomic_t fscache_n_updates;
63353-extern atomic_t fscache_n_updates_null;
63354-extern atomic_t fscache_n_updates_run;
63355+extern atomic_unchecked_t fscache_n_updates;
63356+extern atomic_unchecked_t fscache_n_updates_null;
63357+extern atomic_unchecked_t fscache_n_updates_run;
63358
63359-extern atomic_t fscache_n_relinquishes;
63360-extern atomic_t fscache_n_relinquishes_null;
63361-extern atomic_t fscache_n_relinquishes_waitcrt;
63362-extern atomic_t fscache_n_relinquishes_retire;
63363+extern atomic_unchecked_t fscache_n_relinquishes;
63364+extern atomic_unchecked_t fscache_n_relinquishes_null;
63365+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
63366+extern atomic_unchecked_t fscache_n_relinquishes_retire;
63367
63368-extern atomic_t fscache_n_cookie_index;
63369-extern atomic_t fscache_n_cookie_data;
63370-extern atomic_t fscache_n_cookie_special;
63371+extern atomic_unchecked_t fscache_n_cookie_index;
63372+extern atomic_unchecked_t fscache_n_cookie_data;
63373+extern atomic_unchecked_t fscache_n_cookie_special;
63374
63375-extern atomic_t fscache_n_object_alloc;
63376-extern atomic_t fscache_n_object_no_alloc;
63377-extern atomic_t fscache_n_object_lookups;
63378-extern atomic_t fscache_n_object_lookups_negative;
63379-extern atomic_t fscache_n_object_lookups_positive;
63380-extern atomic_t fscache_n_object_lookups_timed_out;
63381-extern atomic_t fscache_n_object_created;
63382-extern atomic_t fscache_n_object_avail;
63383-extern atomic_t fscache_n_object_dead;
63384+extern atomic_unchecked_t fscache_n_object_alloc;
63385+extern atomic_unchecked_t fscache_n_object_no_alloc;
63386+extern atomic_unchecked_t fscache_n_object_lookups;
63387+extern atomic_unchecked_t fscache_n_object_lookups_negative;
63388+extern atomic_unchecked_t fscache_n_object_lookups_positive;
63389+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
63390+extern atomic_unchecked_t fscache_n_object_created;
63391+extern atomic_unchecked_t fscache_n_object_avail;
63392+extern atomic_unchecked_t fscache_n_object_dead;
63393
63394-extern atomic_t fscache_n_checkaux_none;
63395-extern atomic_t fscache_n_checkaux_okay;
63396-extern atomic_t fscache_n_checkaux_update;
63397-extern atomic_t fscache_n_checkaux_obsolete;
63398+extern atomic_unchecked_t fscache_n_checkaux_none;
63399+extern atomic_unchecked_t fscache_n_checkaux_okay;
63400+extern atomic_unchecked_t fscache_n_checkaux_update;
63401+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
63402
63403 extern atomic_t fscache_n_cop_alloc_object;
63404 extern atomic_t fscache_n_cop_lookup_object;
63405@@ -276,6 +276,11 @@ static inline void fscache_stat(atomic_t *stat)
63406 atomic_inc(stat);
63407 }
63408
63409+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
63410+{
63411+ atomic_inc_unchecked(stat);
63412+}
63413+
63414 static inline void fscache_stat_d(atomic_t *stat)
63415 {
63416 atomic_dec(stat);
63417@@ -288,6 +293,7 @@ extern const struct file_operations fscache_stats_fops;
63418
63419 #define __fscache_stat(stat) (NULL)
63420 #define fscache_stat(stat) do {} while (0)
63421+#define fscache_stat_unchecked(stat) do {} while (0)
63422 #define fscache_stat_d(stat) do {} while (0)
63423 #endif
63424
63425diff --git a/fs/fscache/object.c b/fs/fscache/object.c
63426index da032da..0076ce7 100644
63427--- a/fs/fscache/object.c
63428+++ b/fs/fscache/object.c
63429@@ -454,7 +454,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
63430 _debug("LOOKUP \"%s\" in \"%s\"",
63431 cookie->def->name, object->cache->tag->name);
63432
63433- fscache_stat(&fscache_n_object_lookups);
63434+ fscache_stat_unchecked(&fscache_n_object_lookups);
63435 fscache_stat(&fscache_n_cop_lookup_object);
63436 ret = object->cache->ops->lookup_object(object);
63437 fscache_stat_d(&fscache_n_cop_lookup_object);
63438@@ -464,7 +464,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
63439 if (ret == -ETIMEDOUT) {
63440 /* probably stuck behind another object, so move this one to
63441 * the back of the queue */
63442- fscache_stat(&fscache_n_object_lookups_timed_out);
63443+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
63444 _leave(" [timeout]");
63445 return NO_TRANSIT;
63446 }
63447@@ -492,7 +492,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
63448 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
63449
63450 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
63451- fscache_stat(&fscache_n_object_lookups_negative);
63452+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
63453
63454 /* Allow write requests to begin stacking up and read requests to begin
63455 * returning ENODATA.
63456@@ -527,7 +527,7 @@ void fscache_obtained_object(struct fscache_object *object)
63457 /* if we were still looking up, then we must have a positive lookup
63458 * result, in which case there may be data available */
63459 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
63460- fscache_stat(&fscache_n_object_lookups_positive);
63461+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
63462
63463 /* We do (presumably) have data */
63464 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
63465@@ -539,7 +539,7 @@ void fscache_obtained_object(struct fscache_object *object)
63466 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
63467 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
63468 } else {
63469- fscache_stat(&fscache_n_object_created);
63470+ fscache_stat_unchecked(&fscache_n_object_created);
63471 }
63472
63473 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
63474@@ -575,7 +575,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
63475 fscache_stat_d(&fscache_n_cop_lookup_complete);
63476
63477 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
63478- fscache_stat(&fscache_n_object_avail);
63479+ fscache_stat_unchecked(&fscache_n_object_avail);
63480
63481 _leave("");
63482 return transit_to(JUMPSTART_DEPS);
63483@@ -722,7 +722,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
63484
63485 /* this just shifts the object release to the work processor */
63486 fscache_put_object(object);
63487- fscache_stat(&fscache_n_object_dead);
63488+ fscache_stat_unchecked(&fscache_n_object_dead);
63489
63490 _leave("");
63491 return transit_to(OBJECT_DEAD);
63492@@ -887,7 +887,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
63493 enum fscache_checkaux result;
63494
63495 if (!object->cookie->def->check_aux) {
63496- fscache_stat(&fscache_n_checkaux_none);
63497+ fscache_stat_unchecked(&fscache_n_checkaux_none);
63498 return FSCACHE_CHECKAUX_OKAY;
63499 }
63500
63501@@ -896,17 +896,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
63502 switch (result) {
63503 /* entry okay as is */
63504 case FSCACHE_CHECKAUX_OKAY:
63505- fscache_stat(&fscache_n_checkaux_okay);
63506+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
63507 break;
63508
63509 /* entry requires update */
63510 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
63511- fscache_stat(&fscache_n_checkaux_update);
63512+ fscache_stat_unchecked(&fscache_n_checkaux_update);
63513 break;
63514
63515 /* entry requires deletion */
63516 case FSCACHE_CHECKAUX_OBSOLETE:
63517- fscache_stat(&fscache_n_checkaux_obsolete);
63518+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
63519 break;
63520
63521 default:
63522@@ -993,7 +993,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
63523 {
63524 const struct fscache_state *s;
63525
63526- fscache_stat(&fscache_n_invalidates_run);
63527+ fscache_stat_unchecked(&fscache_n_invalidates_run);
63528 fscache_stat(&fscache_n_cop_invalidate_object);
63529 s = _fscache_invalidate_object(object, event);
63530 fscache_stat_d(&fscache_n_cop_invalidate_object);
63531@@ -1008,7 +1008,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
63532 {
63533 _enter("{OBJ%x},%d", object->debug_id, event);
63534
63535- fscache_stat(&fscache_n_updates_run);
63536+ fscache_stat_unchecked(&fscache_n_updates_run);
63537 fscache_stat(&fscache_n_cop_update_object);
63538 object->cache->ops->update_object(object);
63539 fscache_stat_d(&fscache_n_cop_update_object);
63540diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
63541index e7b87a0..a85d47a 100644
63542--- a/fs/fscache/operation.c
63543+++ b/fs/fscache/operation.c
63544@@ -17,7 +17,7 @@
63545 #include <linux/slab.h>
63546 #include "internal.h"
63547
63548-atomic_t fscache_op_debug_id;
63549+atomic_unchecked_t fscache_op_debug_id;
63550 EXPORT_SYMBOL(fscache_op_debug_id);
63551
63552 /**
63553@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
63554 ASSERTCMP(atomic_read(&op->usage), >, 0);
63555 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
63556
63557- fscache_stat(&fscache_n_op_enqueue);
63558+ fscache_stat_unchecked(&fscache_n_op_enqueue);
63559 switch (op->flags & FSCACHE_OP_TYPE) {
63560 case FSCACHE_OP_ASYNC:
63561 _debug("queue async");
63562@@ -72,7 +72,7 @@ static void fscache_run_op(struct fscache_object *object,
63563 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
63564 if (op->processor)
63565 fscache_enqueue_operation(op);
63566- fscache_stat(&fscache_n_op_run);
63567+ fscache_stat_unchecked(&fscache_n_op_run);
63568 }
63569
63570 /*
63571@@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
63572 if (object->n_in_progress > 0) {
63573 atomic_inc(&op->usage);
63574 list_add_tail(&op->pend_link, &object->pending_ops);
63575- fscache_stat(&fscache_n_op_pend);
63576+ fscache_stat_unchecked(&fscache_n_op_pend);
63577 } else if (!list_empty(&object->pending_ops)) {
63578 atomic_inc(&op->usage);
63579 list_add_tail(&op->pend_link, &object->pending_ops);
63580- fscache_stat(&fscache_n_op_pend);
63581+ fscache_stat_unchecked(&fscache_n_op_pend);
63582 fscache_start_operations(object);
63583 } else {
63584 ASSERTCMP(object->n_in_progress, ==, 0);
63585@@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
63586 object->n_exclusive++; /* reads and writes must wait */
63587 atomic_inc(&op->usage);
63588 list_add_tail(&op->pend_link, &object->pending_ops);
63589- fscache_stat(&fscache_n_op_pend);
63590+ fscache_stat_unchecked(&fscache_n_op_pend);
63591 ret = 0;
63592 } else {
63593 /* If we're in any other state, there must have been an I/O
63594@@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_object *object,
63595 if (object->n_exclusive > 0) {
63596 atomic_inc(&op->usage);
63597 list_add_tail(&op->pend_link, &object->pending_ops);
63598- fscache_stat(&fscache_n_op_pend);
63599+ fscache_stat_unchecked(&fscache_n_op_pend);
63600 } else if (!list_empty(&object->pending_ops)) {
63601 atomic_inc(&op->usage);
63602 list_add_tail(&op->pend_link, &object->pending_ops);
63603- fscache_stat(&fscache_n_op_pend);
63604+ fscache_stat_unchecked(&fscache_n_op_pend);
63605 fscache_start_operations(object);
63606 } else {
63607 ASSERTCMP(object->n_exclusive, ==, 0);
63608@@ -227,10 +227,10 @@ int fscache_submit_op(struct fscache_object *object,
63609 object->n_ops++;
63610 atomic_inc(&op->usage);
63611 list_add_tail(&op->pend_link, &object->pending_ops);
63612- fscache_stat(&fscache_n_op_pend);
63613+ fscache_stat_unchecked(&fscache_n_op_pend);
63614 ret = 0;
63615 } else if (fscache_object_is_dying(object)) {
63616- fscache_stat(&fscache_n_op_rejected);
63617+ fscache_stat_unchecked(&fscache_n_op_rejected);
63618 op->state = FSCACHE_OP_ST_CANCELLED;
63619 ret = -ENOBUFS;
63620 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
63621@@ -309,7 +309,7 @@ int fscache_cancel_op(struct fscache_operation *op,
63622 ret = -EBUSY;
63623 if (op->state == FSCACHE_OP_ST_PENDING) {
63624 ASSERT(!list_empty(&op->pend_link));
63625- fscache_stat(&fscache_n_op_cancelled);
63626+ fscache_stat_unchecked(&fscache_n_op_cancelled);
63627 list_del_init(&op->pend_link);
63628 if (do_cancel)
63629 do_cancel(op);
63630@@ -341,7 +341,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
63631 while (!list_empty(&object->pending_ops)) {
63632 op = list_entry(object->pending_ops.next,
63633 struct fscache_operation, pend_link);
63634- fscache_stat(&fscache_n_op_cancelled);
63635+ fscache_stat_unchecked(&fscache_n_op_cancelled);
63636 list_del_init(&op->pend_link);
63637
63638 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
63639@@ -413,7 +413,7 @@ void fscache_put_operation(struct fscache_operation *op)
63640 op->state, ==, FSCACHE_OP_ST_CANCELLED);
63641 op->state = FSCACHE_OP_ST_DEAD;
63642
63643- fscache_stat(&fscache_n_op_release);
63644+ fscache_stat_unchecked(&fscache_n_op_release);
63645
63646 if (op->release) {
63647 op->release(op);
63648@@ -432,7 +432,7 @@ void fscache_put_operation(struct fscache_operation *op)
63649 * lock, and defer it otherwise */
63650 if (!spin_trylock(&object->lock)) {
63651 _debug("defer put");
63652- fscache_stat(&fscache_n_op_deferred_release);
63653+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
63654
63655 cache = object->cache;
63656 spin_lock(&cache->op_gc_list_lock);
63657@@ -485,7 +485,7 @@ void fscache_operation_gc(struct work_struct *work)
63658
63659 _debug("GC DEFERRED REL OBJ%x OP%x",
63660 object->debug_id, op->debug_id);
63661- fscache_stat(&fscache_n_op_gc);
63662+ fscache_stat_unchecked(&fscache_n_op_gc);
63663
63664 ASSERTCMP(atomic_read(&op->usage), ==, 0);
63665 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
63666diff --git a/fs/fscache/page.c b/fs/fscache/page.c
63667index de33b3f..8be4d29 100644
63668--- a/fs/fscache/page.c
63669+++ b/fs/fscache/page.c
63670@@ -74,7 +74,7 @@ try_again:
63671 val = radix_tree_lookup(&cookie->stores, page->index);
63672 if (!val) {
63673 rcu_read_unlock();
63674- fscache_stat(&fscache_n_store_vmscan_not_storing);
63675+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
63676 __fscache_uncache_page(cookie, page);
63677 return true;
63678 }
63679@@ -104,11 +104,11 @@ try_again:
63680 spin_unlock(&cookie->stores_lock);
63681
63682 if (xpage) {
63683- fscache_stat(&fscache_n_store_vmscan_cancelled);
63684- fscache_stat(&fscache_n_store_radix_deletes);
63685+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
63686+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
63687 ASSERTCMP(xpage, ==, page);
63688 } else {
63689- fscache_stat(&fscache_n_store_vmscan_gone);
63690+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
63691 }
63692
63693 wake_up_bit(&cookie->flags, 0);
63694@@ -123,11 +123,11 @@ page_busy:
63695 * sleeping on memory allocation, so we may need to impose a timeout
63696 * too. */
63697 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
63698- fscache_stat(&fscache_n_store_vmscan_busy);
63699+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
63700 return false;
63701 }
63702
63703- fscache_stat(&fscache_n_store_vmscan_wait);
63704+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
63705 if (!release_page_wait_timeout(cookie, page))
63706 _debug("fscache writeout timeout page: %p{%lx}",
63707 page, page->index);
63708@@ -156,7 +156,7 @@ static void fscache_end_page_write(struct fscache_object *object,
63709 FSCACHE_COOKIE_STORING_TAG);
63710 if (!radix_tree_tag_get(&cookie->stores, page->index,
63711 FSCACHE_COOKIE_PENDING_TAG)) {
63712- fscache_stat(&fscache_n_store_radix_deletes);
63713+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
63714 xpage = radix_tree_delete(&cookie->stores, page->index);
63715 }
63716 spin_unlock(&cookie->stores_lock);
63717@@ -177,7 +177,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
63718
63719 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
63720
63721- fscache_stat(&fscache_n_attr_changed_calls);
63722+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
63723
63724 if (fscache_object_is_active(object)) {
63725 fscache_stat(&fscache_n_cop_attr_changed);
63726@@ -204,11 +204,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
63727
63728 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
63729
63730- fscache_stat(&fscache_n_attr_changed);
63731+ fscache_stat_unchecked(&fscache_n_attr_changed);
63732
63733 op = kzalloc(sizeof(*op), GFP_KERNEL);
63734 if (!op) {
63735- fscache_stat(&fscache_n_attr_changed_nomem);
63736+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
63737 _leave(" = -ENOMEM");
63738 return -ENOMEM;
63739 }
63740@@ -230,7 +230,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
63741 if (fscache_submit_exclusive_op(object, op) < 0)
63742 goto nobufs_dec;
63743 spin_unlock(&cookie->lock);
63744- fscache_stat(&fscache_n_attr_changed_ok);
63745+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
63746 fscache_put_operation(op);
63747 _leave(" = 0");
63748 return 0;
63749@@ -242,7 +242,7 @@ nobufs:
63750 kfree(op);
63751 if (wake_cookie)
63752 __fscache_wake_unused_cookie(cookie);
63753- fscache_stat(&fscache_n_attr_changed_nobufs);
63754+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
63755 _leave(" = %d", -ENOBUFS);
63756 return -ENOBUFS;
63757 }
63758@@ -281,7 +281,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
63759 /* allocate a retrieval operation and attempt to submit it */
63760 op = kzalloc(sizeof(*op), GFP_NOIO);
63761 if (!op) {
63762- fscache_stat(&fscache_n_retrievals_nomem);
63763+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
63764 return NULL;
63765 }
63766
63767@@ -311,12 +311,12 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
63768 return 0;
63769 }
63770
63771- fscache_stat(&fscache_n_retrievals_wait);
63772+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
63773
63774 jif = jiffies;
63775 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
63776 TASK_INTERRUPTIBLE) != 0) {
63777- fscache_stat(&fscache_n_retrievals_intr);
63778+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
63779 _leave(" = -ERESTARTSYS");
63780 return -ERESTARTSYS;
63781 }
63782@@ -345,8 +345,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
63783 */
63784 int fscache_wait_for_operation_activation(struct fscache_object *object,
63785 struct fscache_operation *op,
63786- atomic_t *stat_op_waits,
63787- atomic_t *stat_object_dead,
63788+ atomic_unchecked_t *stat_op_waits,
63789+ atomic_unchecked_t *stat_object_dead,
63790 void (*do_cancel)(struct fscache_operation *))
63791 {
63792 int ret;
63793@@ -356,7 +356,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
63794
63795 _debug(">>> WT");
63796 if (stat_op_waits)
63797- fscache_stat(stat_op_waits);
63798+ fscache_stat_unchecked(stat_op_waits);
63799 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
63800 TASK_INTERRUPTIBLE) != 0) {
63801 ret = fscache_cancel_op(op, do_cancel);
63802@@ -373,7 +373,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
63803 check_if_dead:
63804 if (op->state == FSCACHE_OP_ST_CANCELLED) {
63805 if (stat_object_dead)
63806- fscache_stat(stat_object_dead);
63807+ fscache_stat_unchecked(stat_object_dead);
63808 _leave(" = -ENOBUFS [cancelled]");
63809 return -ENOBUFS;
63810 }
63811@@ -381,7 +381,7 @@ check_if_dead:
63812 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
63813 fscache_cancel_op(op, do_cancel);
63814 if (stat_object_dead)
63815- fscache_stat(stat_object_dead);
63816+ fscache_stat_unchecked(stat_object_dead);
63817 return -ENOBUFS;
63818 }
63819 return 0;
63820@@ -409,7 +409,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
63821
63822 _enter("%p,%p,,,", cookie, page);
63823
63824- fscache_stat(&fscache_n_retrievals);
63825+ fscache_stat_unchecked(&fscache_n_retrievals);
63826
63827 if (hlist_empty(&cookie->backing_objects))
63828 goto nobufs;
63829@@ -451,7 +451,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
63830 goto nobufs_unlock_dec;
63831 spin_unlock(&cookie->lock);
63832
63833- fscache_stat(&fscache_n_retrieval_ops);
63834+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
63835
63836 /* pin the netfs read context in case we need to do the actual netfs
63837 * read because we've encountered a cache read failure */
63838@@ -482,15 +482,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
63839
63840 error:
63841 if (ret == -ENOMEM)
63842- fscache_stat(&fscache_n_retrievals_nomem);
63843+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
63844 else if (ret == -ERESTARTSYS)
63845- fscache_stat(&fscache_n_retrievals_intr);
63846+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
63847 else if (ret == -ENODATA)
63848- fscache_stat(&fscache_n_retrievals_nodata);
63849+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
63850 else if (ret < 0)
63851- fscache_stat(&fscache_n_retrievals_nobufs);
63852+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63853 else
63854- fscache_stat(&fscache_n_retrievals_ok);
63855+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
63856
63857 fscache_put_retrieval(op);
63858 _leave(" = %d", ret);
63859@@ -505,7 +505,7 @@ nobufs_unlock:
63860 __fscache_wake_unused_cookie(cookie);
63861 kfree(op);
63862 nobufs:
63863- fscache_stat(&fscache_n_retrievals_nobufs);
63864+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63865 _leave(" = -ENOBUFS");
63866 return -ENOBUFS;
63867 }
63868@@ -544,7 +544,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
63869
63870 _enter("%p,,%d,,,", cookie, *nr_pages);
63871
63872- fscache_stat(&fscache_n_retrievals);
63873+ fscache_stat_unchecked(&fscache_n_retrievals);
63874
63875 if (hlist_empty(&cookie->backing_objects))
63876 goto nobufs;
63877@@ -582,7 +582,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
63878 goto nobufs_unlock_dec;
63879 spin_unlock(&cookie->lock);
63880
63881- fscache_stat(&fscache_n_retrieval_ops);
63882+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
63883
63884 /* pin the netfs read context in case we need to do the actual netfs
63885 * read because we've encountered a cache read failure */
63886@@ -613,15 +613,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
63887
63888 error:
63889 if (ret == -ENOMEM)
63890- fscache_stat(&fscache_n_retrievals_nomem);
63891+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
63892 else if (ret == -ERESTARTSYS)
63893- fscache_stat(&fscache_n_retrievals_intr);
63894+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
63895 else if (ret == -ENODATA)
63896- fscache_stat(&fscache_n_retrievals_nodata);
63897+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
63898 else if (ret < 0)
63899- fscache_stat(&fscache_n_retrievals_nobufs);
63900+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63901 else
63902- fscache_stat(&fscache_n_retrievals_ok);
63903+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
63904
63905 fscache_put_retrieval(op);
63906 _leave(" = %d", ret);
63907@@ -636,7 +636,7 @@ nobufs_unlock:
63908 if (wake_cookie)
63909 __fscache_wake_unused_cookie(cookie);
63910 nobufs:
63911- fscache_stat(&fscache_n_retrievals_nobufs);
63912+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63913 _leave(" = -ENOBUFS");
63914 return -ENOBUFS;
63915 }
63916@@ -661,7 +661,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
63917
63918 _enter("%p,%p,,,", cookie, page);
63919
63920- fscache_stat(&fscache_n_allocs);
63921+ fscache_stat_unchecked(&fscache_n_allocs);
63922
63923 if (hlist_empty(&cookie->backing_objects))
63924 goto nobufs;
63925@@ -695,7 +695,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
63926 goto nobufs_unlock_dec;
63927 spin_unlock(&cookie->lock);
63928
63929- fscache_stat(&fscache_n_alloc_ops);
63930+ fscache_stat_unchecked(&fscache_n_alloc_ops);
63931
63932 ret = fscache_wait_for_operation_activation(
63933 object, &op->op,
63934@@ -712,11 +712,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
63935
63936 error:
63937 if (ret == -ERESTARTSYS)
63938- fscache_stat(&fscache_n_allocs_intr);
63939+ fscache_stat_unchecked(&fscache_n_allocs_intr);
63940 else if (ret < 0)
63941- fscache_stat(&fscache_n_allocs_nobufs);
63942+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
63943 else
63944- fscache_stat(&fscache_n_allocs_ok);
63945+ fscache_stat_unchecked(&fscache_n_allocs_ok);
63946
63947 fscache_put_retrieval(op);
63948 _leave(" = %d", ret);
63949@@ -730,7 +730,7 @@ nobufs_unlock:
63950 if (wake_cookie)
63951 __fscache_wake_unused_cookie(cookie);
63952 nobufs:
63953- fscache_stat(&fscache_n_allocs_nobufs);
63954+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
63955 _leave(" = -ENOBUFS");
63956 return -ENOBUFS;
63957 }
63958@@ -806,7 +806,7 @@ static void fscache_write_op(struct fscache_operation *_op)
63959
63960 spin_lock(&cookie->stores_lock);
63961
63962- fscache_stat(&fscache_n_store_calls);
63963+ fscache_stat_unchecked(&fscache_n_store_calls);
63964
63965 /* find a page to store */
63966 page = NULL;
63967@@ -817,7 +817,7 @@ static void fscache_write_op(struct fscache_operation *_op)
63968 page = results[0];
63969 _debug("gang %d [%lx]", n, page->index);
63970 if (page->index > op->store_limit) {
63971- fscache_stat(&fscache_n_store_pages_over_limit);
63972+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
63973 goto superseded;
63974 }
63975
63976@@ -829,7 +829,7 @@ static void fscache_write_op(struct fscache_operation *_op)
63977 spin_unlock(&cookie->stores_lock);
63978 spin_unlock(&object->lock);
63979
63980- fscache_stat(&fscache_n_store_pages);
63981+ fscache_stat_unchecked(&fscache_n_store_pages);
63982 fscache_stat(&fscache_n_cop_write_page);
63983 ret = object->cache->ops->write_page(op, page);
63984 fscache_stat_d(&fscache_n_cop_write_page);
63985@@ -933,7 +933,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63986 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
63987 ASSERT(PageFsCache(page));
63988
63989- fscache_stat(&fscache_n_stores);
63990+ fscache_stat_unchecked(&fscache_n_stores);
63991
63992 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
63993 _leave(" = -ENOBUFS [invalidating]");
63994@@ -992,7 +992,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63995 spin_unlock(&cookie->stores_lock);
63996 spin_unlock(&object->lock);
63997
63998- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
63999+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
64000 op->store_limit = object->store_limit;
64001
64002 __fscache_use_cookie(cookie);
64003@@ -1001,8 +1001,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
64004
64005 spin_unlock(&cookie->lock);
64006 radix_tree_preload_end();
64007- fscache_stat(&fscache_n_store_ops);
64008- fscache_stat(&fscache_n_stores_ok);
64009+ fscache_stat_unchecked(&fscache_n_store_ops);
64010+ fscache_stat_unchecked(&fscache_n_stores_ok);
64011
64012 /* the work queue now carries its own ref on the object */
64013 fscache_put_operation(&op->op);
64014@@ -1010,14 +1010,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
64015 return 0;
64016
64017 already_queued:
64018- fscache_stat(&fscache_n_stores_again);
64019+ fscache_stat_unchecked(&fscache_n_stores_again);
64020 already_pending:
64021 spin_unlock(&cookie->stores_lock);
64022 spin_unlock(&object->lock);
64023 spin_unlock(&cookie->lock);
64024 radix_tree_preload_end();
64025 kfree(op);
64026- fscache_stat(&fscache_n_stores_ok);
64027+ fscache_stat_unchecked(&fscache_n_stores_ok);
64028 _leave(" = 0");
64029 return 0;
64030
64031@@ -1039,14 +1039,14 @@ nobufs:
64032 kfree(op);
64033 if (wake_cookie)
64034 __fscache_wake_unused_cookie(cookie);
64035- fscache_stat(&fscache_n_stores_nobufs);
64036+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
64037 _leave(" = -ENOBUFS");
64038 return -ENOBUFS;
64039
64040 nomem_free:
64041 kfree(op);
64042 nomem:
64043- fscache_stat(&fscache_n_stores_oom);
64044+ fscache_stat_unchecked(&fscache_n_stores_oom);
64045 _leave(" = -ENOMEM");
64046 return -ENOMEM;
64047 }
64048@@ -1064,7 +1064,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
64049 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
64050 ASSERTCMP(page, !=, NULL);
64051
64052- fscache_stat(&fscache_n_uncaches);
64053+ fscache_stat_unchecked(&fscache_n_uncaches);
64054
64055 /* cache withdrawal may beat us to it */
64056 if (!PageFsCache(page))
64057@@ -1115,7 +1115,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
64058 struct fscache_cookie *cookie = op->op.object->cookie;
64059
64060 #ifdef CONFIG_FSCACHE_STATS
64061- atomic_inc(&fscache_n_marks);
64062+ atomic_inc_unchecked(&fscache_n_marks);
64063 #endif
64064
64065 _debug("- mark %p{%lx}", page, page->index);
64066diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
64067index 40d13c7..ddf52b9 100644
64068--- a/fs/fscache/stats.c
64069+++ b/fs/fscache/stats.c
64070@@ -18,99 +18,99 @@
64071 /*
64072 * operation counters
64073 */
64074-atomic_t fscache_n_op_pend;
64075-atomic_t fscache_n_op_run;
64076-atomic_t fscache_n_op_enqueue;
64077-atomic_t fscache_n_op_requeue;
64078-atomic_t fscache_n_op_deferred_release;
64079-atomic_t fscache_n_op_release;
64080-atomic_t fscache_n_op_gc;
64081-atomic_t fscache_n_op_cancelled;
64082-atomic_t fscache_n_op_rejected;
64083+atomic_unchecked_t fscache_n_op_pend;
64084+atomic_unchecked_t fscache_n_op_run;
64085+atomic_unchecked_t fscache_n_op_enqueue;
64086+atomic_unchecked_t fscache_n_op_requeue;
64087+atomic_unchecked_t fscache_n_op_deferred_release;
64088+atomic_unchecked_t fscache_n_op_release;
64089+atomic_unchecked_t fscache_n_op_gc;
64090+atomic_unchecked_t fscache_n_op_cancelled;
64091+atomic_unchecked_t fscache_n_op_rejected;
64092
64093-atomic_t fscache_n_attr_changed;
64094-atomic_t fscache_n_attr_changed_ok;
64095-atomic_t fscache_n_attr_changed_nobufs;
64096-atomic_t fscache_n_attr_changed_nomem;
64097-atomic_t fscache_n_attr_changed_calls;
64098+atomic_unchecked_t fscache_n_attr_changed;
64099+atomic_unchecked_t fscache_n_attr_changed_ok;
64100+atomic_unchecked_t fscache_n_attr_changed_nobufs;
64101+atomic_unchecked_t fscache_n_attr_changed_nomem;
64102+atomic_unchecked_t fscache_n_attr_changed_calls;
64103
64104-atomic_t fscache_n_allocs;
64105-atomic_t fscache_n_allocs_ok;
64106-atomic_t fscache_n_allocs_wait;
64107-atomic_t fscache_n_allocs_nobufs;
64108-atomic_t fscache_n_allocs_intr;
64109-atomic_t fscache_n_allocs_object_dead;
64110-atomic_t fscache_n_alloc_ops;
64111-atomic_t fscache_n_alloc_op_waits;
64112+atomic_unchecked_t fscache_n_allocs;
64113+atomic_unchecked_t fscache_n_allocs_ok;
64114+atomic_unchecked_t fscache_n_allocs_wait;
64115+atomic_unchecked_t fscache_n_allocs_nobufs;
64116+atomic_unchecked_t fscache_n_allocs_intr;
64117+atomic_unchecked_t fscache_n_allocs_object_dead;
64118+atomic_unchecked_t fscache_n_alloc_ops;
64119+atomic_unchecked_t fscache_n_alloc_op_waits;
64120
64121-atomic_t fscache_n_retrievals;
64122-atomic_t fscache_n_retrievals_ok;
64123-atomic_t fscache_n_retrievals_wait;
64124-atomic_t fscache_n_retrievals_nodata;
64125-atomic_t fscache_n_retrievals_nobufs;
64126-atomic_t fscache_n_retrievals_intr;
64127-atomic_t fscache_n_retrievals_nomem;
64128-atomic_t fscache_n_retrievals_object_dead;
64129-atomic_t fscache_n_retrieval_ops;
64130-atomic_t fscache_n_retrieval_op_waits;
64131+atomic_unchecked_t fscache_n_retrievals;
64132+atomic_unchecked_t fscache_n_retrievals_ok;
64133+atomic_unchecked_t fscache_n_retrievals_wait;
64134+atomic_unchecked_t fscache_n_retrievals_nodata;
64135+atomic_unchecked_t fscache_n_retrievals_nobufs;
64136+atomic_unchecked_t fscache_n_retrievals_intr;
64137+atomic_unchecked_t fscache_n_retrievals_nomem;
64138+atomic_unchecked_t fscache_n_retrievals_object_dead;
64139+atomic_unchecked_t fscache_n_retrieval_ops;
64140+atomic_unchecked_t fscache_n_retrieval_op_waits;
64141
64142-atomic_t fscache_n_stores;
64143-atomic_t fscache_n_stores_ok;
64144-atomic_t fscache_n_stores_again;
64145-atomic_t fscache_n_stores_nobufs;
64146-atomic_t fscache_n_stores_oom;
64147-atomic_t fscache_n_store_ops;
64148-atomic_t fscache_n_store_calls;
64149-atomic_t fscache_n_store_pages;
64150-atomic_t fscache_n_store_radix_deletes;
64151-atomic_t fscache_n_store_pages_over_limit;
64152+atomic_unchecked_t fscache_n_stores;
64153+atomic_unchecked_t fscache_n_stores_ok;
64154+atomic_unchecked_t fscache_n_stores_again;
64155+atomic_unchecked_t fscache_n_stores_nobufs;
64156+atomic_unchecked_t fscache_n_stores_oom;
64157+atomic_unchecked_t fscache_n_store_ops;
64158+atomic_unchecked_t fscache_n_store_calls;
64159+atomic_unchecked_t fscache_n_store_pages;
64160+atomic_unchecked_t fscache_n_store_radix_deletes;
64161+atomic_unchecked_t fscache_n_store_pages_over_limit;
64162
64163-atomic_t fscache_n_store_vmscan_not_storing;
64164-atomic_t fscache_n_store_vmscan_gone;
64165-atomic_t fscache_n_store_vmscan_busy;
64166-atomic_t fscache_n_store_vmscan_cancelled;
64167-atomic_t fscache_n_store_vmscan_wait;
64168+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
64169+atomic_unchecked_t fscache_n_store_vmscan_gone;
64170+atomic_unchecked_t fscache_n_store_vmscan_busy;
64171+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
64172+atomic_unchecked_t fscache_n_store_vmscan_wait;
64173
64174-atomic_t fscache_n_marks;
64175-atomic_t fscache_n_uncaches;
64176+atomic_unchecked_t fscache_n_marks;
64177+atomic_unchecked_t fscache_n_uncaches;
64178
64179-atomic_t fscache_n_acquires;
64180-atomic_t fscache_n_acquires_null;
64181-atomic_t fscache_n_acquires_no_cache;
64182-atomic_t fscache_n_acquires_ok;
64183-atomic_t fscache_n_acquires_nobufs;
64184-atomic_t fscache_n_acquires_oom;
64185+atomic_unchecked_t fscache_n_acquires;
64186+atomic_unchecked_t fscache_n_acquires_null;
64187+atomic_unchecked_t fscache_n_acquires_no_cache;
64188+atomic_unchecked_t fscache_n_acquires_ok;
64189+atomic_unchecked_t fscache_n_acquires_nobufs;
64190+atomic_unchecked_t fscache_n_acquires_oom;
64191
64192-atomic_t fscache_n_invalidates;
64193-atomic_t fscache_n_invalidates_run;
64194+atomic_unchecked_t fscache_n_invalidates;
64195+atomic_unchecked_t fscache_n_invalidates_run;
64196
64197-atomic_t fscache_n_updates;
64198-atomic_t fscache_n_updates_null;
64199-atomic_t fscache_n_updates_run;
64200+atomic_unchecked_t fscache_n_updates;
64201+atomic_unchecked_t fscache_n_updates_null;
64202+atomic_unchecked_t fscache_n_updates_run;
64203
64204-atomic_t fscache_n_relinquishes;
64205-atomic_t fscache_n_relinquishes_null;
64206-atomic_t fscache_n_relinquishes_waitcrt;
64207-atomic_t fscache_n_relinquishes_retire;
64208+atomic_unchecked_t fscache_n_relinquishes;
64209+atomic_unchecked_t fscache_n_relinquishes_null;
64210+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
64211+atomic_unchecked_t fscache_n_relinquishes_retire;
64212
64213-atomic_t fscache_n_cookie_index;
64214-atomic_t fscache_n_cookie_data;
64215-atomic_t fscache_n_cookie_special;
64216+atomic_unchecked_t fscache_n_cookie_index;
64217+atomic_unchecked_t fscache_n_cookie_data;
64218+atomic_unchecked_t fscache_n_cookie_special;
64219
64220-atomic_t fscache_n_object_alloc;
64221-atomic_t fscache_n_object_no_alloc;
64222-atomic_t fscache_n_object_lookups;
64223-atomic_t fscache_n_object_lookups_negative;
64224-atomic_t fscache_n_object_lookups_positive;
64225-atomic_t fscache_n_object_lookups_timed_out;
64226-atomic_t fscache_n_object_created;
64227-atomic_t fscache_n_object_avail;
64228-atomic_t fscache_n_object_dead;
64229+atomic_unchecked_t fscache_n_object_alloc;
64230+atomic_unchecked_t fscache_n_object_no_alloc;
64231+atomic_unchecked_t fscache_n_object_lookups;
64232+atomic_unchecked_t fscache_n_object_lookups_negative;
64233+atomic_unchecked_t fscache_n_object_lookups_positive;
64234+atomic_unchecked_t fscache_n_object_lookups_timed_out;
64235+atomic_unchecked_t fscache_n_object_created;
64236+atomic_unchecked_t fscache_n_object_avail;
64237+atomic_unchecked_t fscache_n_object_dead;
64238
64239-atomic_t fscache_n_checkaux_none;
64240-atomic_t fscache_n_checkaux_okay;
64241-atomic_t fscache_n_checkaux_update;
64242-atomic_t fscache_n_checkaux_obsolete;
64243+atomic_unchecked_t fscache_n_checkaux_none;
64244+atomic_unchecked_t fscache_n_checkaux_okay;
64245+atomic_unchecked_t fscache_n_checkaux_update;
64246+atomic_unchecked_t fscache_n_checkaux_obsolete;
64247
64248 atomic_t fscache_n_cop_alloc_object;
64249 atomic_t fscache_n_cop_lookup_object;
64250@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
64251 seq_puts(m, "FS-Cache statistics\n");
64252
64253 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
64254- atomic_read(&fscache_n_cookie_index),
64255- atomic_read(&fscache_n_cookie_data),
64256- atomic_read(&fscache_n_cookie_special));
64257+ atomic_read_unchecked(&fscache_n_cookie_index),
64258+ atomic_read_unchecked(&fscache_n_cookie_data),
64259+ atomic_read_unchecked(&fscache_n_cookie_special));
64260
64261 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
64262- atomic_read(&fscache_n_object_alloc),
64263- atomic_read(&fscache_n_object_no_alloc),
64264- atomic_read(&fscache_n_object_avail),
64265- atomic_read(&fscache_n_object_dead));
64266+ atomic_read_unchecked(&fscache_n_object_alloc),
64267+ atomic_read_unchecked(&fscache_n_object_no_alloc),
64268+ atomic_read_unchecked(&fscache_n_object_avail),
64269+ atomic_read_unchecked(&fscache_n_object_dead));
64270 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
64271- atomic_read(&fscache_n_checkaux_none),
64272- atomic_read(&fscache_n_checkaux_okay),
64273- atomic_read(&fscache_n_checkaux_update),
64274- atomic_read(&fscache_n_checkaux_obsolete));
64275+ atomic_read_unchecked(&fscache_n_checkaux_none),
64276+ atomic_read_unchecked(&fscache_n_checkaux_okay),
64277+ atomic_read_unchecked(&fscache_n_checkaux_update),
64278+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
64279
64280 seq_printf(m, "Pages : mrk=%u unc=%u\n",
64281- atomic_read(&fscache_n_marks),
64282- atomic_read(&fscache_n_uncaches));
64283+ atomic_read_unchecked(&fscache_n_marks),
64284+ atomic_read_unchecked(&fscache_n_uncaches));
64285
64286 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
64287 " oom=%u\n",
64288- atomic_read(&fscache_n_acquires),
64289- atomic_read(&fscache_n_acquires_null),
64290- atomic_read(&fscache_n_acquires_no_cache),
64291- atomic_read(&fscache_n_acquires_ok),
64292- atomic_read(&fscache_n_acquires_nobufs),
64293- atomic_read(&fscache_n_acquires_oom));
64294+ atomic_read_unchecked(&fscache_n_acquires),
64295+ atomic_read_unchecked(&fscache_n_acquires_null),
64296+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
64297+ atomic_read_unchecked(&fscache_n_acquires_ok),
64298+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
64299+ atomic_read_unchecked(&fscache_n_acquires_oom));
64300
64301 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
64302- atomic_read(&fscache_n_object_lookups),
64303- atomic_read(&fscache_n_object_lookups_negative),
64304- atomic_read(&fscache_n_object_lookups_positive),
64305- atomic_read(&fscache_n_object_created),
64306- atomic_read(&fscache_n_object_lookups_timed_out));
64307+ atomic_read_unchecked(&fscache_n_object_lookups),
64308+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
64309+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
64310+ atomic_read_unchecked(&fscache_n_object_created),
64311+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
64312
64313 seq_printf(m, "Invals : n=%u run=%u\n",
64314- atomic_read(&fscache_n_invalidates),
64315- atomic_read(&fscache_n_invalidates_run));
64316+ atomic_read_unchecked(&fscache_n_invalidates),
64317+ atomic_read_unchecked(&fscache_n_invalidates_run));
64318
64319 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
64320- atomic_read(&fscache_n_updates),
64321- atomic_read(&fscache_n_updates_null),
64322- atomic_read(&fscache_n_updates_run));
64323+ atomic_read_unchecked(&fscache_n_updates),
64324+ atomic_read_unchecked(&fscache_n_updates_null),
64325+ atomic_read_unchecked(&fscache_n_updates_run));
64326
64327 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
64328- atomic_read(&fscache_n_relinquishes),
64329- atomic_read(&fscache_n_relinquishes_null),
64330- atomic_read(&fscache_n_relinquishes_waitcrt),
64331- atomic_read(&fscache_n_relinquishes_retire));
64332+ atomic_read_unchecked(&fscache_n_relinquishes),
64333+ atomic_read_unchecked(&fscache_n_relinquishes_null),
64334+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
64335+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
64336
64337 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
64338- atomic_read(&fscache_n_attr_changed),
64339- atomic_read(&fscache_n_attr_changed_ok),
64340- atomic_read(&fscache_n_attr_changed_nobufs),
64341- atomic_read(&fscache_n_attr_changed_nomem),
64342- atomic_read(&fscache_n_attr_changed_calls));
64343+ atomic_read_unchecked(&fscache_n_attr_changed),
64344+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
64345+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
64346+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
64347+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
64348
64349 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
64350- atomic_read(&fscache_n_allocs),
64351- atomic_read(&fscache_n_allocs_ok),
64352- atomic_read(&fscache_n_allocs_wait),
64353- atomic_read(&fscache_n_allocs_nobufs),
64354- atomic_read(&fscache_n_allocs_intr));
64355+ atomic_read_unchecked(&fscache_n_allocs),
64356+ atomic_read_unchecked(&fscache_n_allocs_ok),
64357+ atomic_read_unchecked(&fscache_n_allocs_wait),
64358+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
64359+ atomic_read_unchecked(&fscache_n_allocs_intr));
64360 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
64361- atomic_read(&fscache_n_alloc_ops),
64362- atomic_read(&fscache_n_alloc_op_waits),
64363- atomic_read(&fscache_n_allocs_object_dead));
64364+ atomic_read_unchecked(&fscache_n_alloc_ops),
64365+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
64366+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
64367
64368 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
64369 " int=%u oom=%u\n",
64370- atomic_read(&fscache_n_retrievals),
64371- atomic_read(&fscache_n_retrievals_ok),
64372- atomic_read(&fscache_n_retrievals_wait),
64373- atomic_read(&fscache_n_retrievals_nodata),
64374- atomic_read(&fscache_n_retrievals_nobufs),
64375- atomic_read(&fscache_n_retrievals_intr),
64376- atomic_read(&fscache_n_retrievals_nomem));
64377+ atomic_read_unchecked(&fscache_n_retrievals),
64378+ atomic_read_unchecked(&fscache_n_retrievals_ok),
64379+ atomic_read_unchecked(&fscache_n_retrievals_wait),
64380+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
64381+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
64382+ atomic_read_unchecked(&fscache_n_retrievals_intr),
64383+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
64384 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
64385- atomic_read(&fscache_n_retrieval_ops),
64386- atomic_read(&fscache_n_retrieval_op_waits),
64387- atomic_read(&fscache_n_retrievals_object_dead));
64388+ atomic_read_unchecked(&fscache_n_retrieval_ops),
64389+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
64390+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
64391
64392 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
64393- atomic_read(&fscache_n_stores),
64394- atomic_read(&fscache_n_stores_ok),
64395- atomic_read(&fscache_n_stores_again),
64396- atomic_read(&fscache_n_stores_nobufs),
64397- atomic_read(&fscache_n_stores_oom));
64398+ atomic_read_unchecked(&fscache_n_stores),
64399+ atomic_read_unchecked(&fscache_n_stores_ok),
64400+ atomic_read_unchecked(&fscache_n_stores_again),
64401+ atomic_read_unchecked(&fscache_n_stores_nobufs),
64402+ atomic_read_unchecked(&fscache_n_stores_oom));
64403 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
64404- atomic_read(&fscache_n_store_ops),
64405- atomic_read(&fscache_n_store_calls),
64406- atomic_read(&fscache_n_store_pages),
64407- atomic_read(&fscache_n_store_radix_deletes),
64408- atomic_read(&fscache_n_store_pages_over_limit));
64409+ atomic_read_unchecked(&fscache_n_store_ops),
64410+ atomic_read_unchecked(&fscache_n_store_calls),
64411+ atomic_read_unchecked(&fscache_n_store_pages),
64412+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
64413+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
64414
64415 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
64416- atomic_read(&fscache_n_store_vmscan_not_storing),
64417- atomic_read(&fscache_n_store_vmscan_gone),
64418- atomic_read(&fscache_n_store_vmscan_busy),
64419- atomic_read(&fscache_n_store_vmscan_cancelled),
64420- atomic_read(&fscache_n_store_vmscan_wait));
64421+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
64422+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
64423+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
64424+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
64425+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
64426
64427 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
64428- atomic_read(&fscache_n_op_pend),
64429- atomic_read(&fscache_n_op_run),
64430- atomic_read(&fscache_n_op_enqueue),
64431- atomic_read(&fscache_n_op_cancelled),
64432- atomic_read(&fscache_n_op_rejected));
64433+ atomic_read_unchecked(&fscache_n_op_pend),
64434+ atomic_read_unchecked(&fscache_n_op_run),
64435+ atomic_read_unchecked(&fscache_n_op_enqueue),
64436+ atomic_read_unchecked(&fscache_n_op_cancelled),
64437+ atomic_read_unchecked(&fscache_n_op_rejected));
64438 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
64439- atomic_read(&fscache_n_op_deferred_release),
64440- atomic_read(&fscache_n_op_release),
64441- atomic_read(&fscache_n_op_gc));
64442+ atomic_read_unchecked(&fscache_n_op_deferred_release),
64443+ atomic_read_unchecked(&fscache_n_op_release),
64444+ atomic_read_unchecked(&fscache_n_op_gc));
64445
64446 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
64447 atomic_read(&fscache_n_cop_alloc_object),
64448diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
64449index 966ace8..030a03a 100644
64450--- a/fs/fuse/cuse.c
64451+++ b/fs/fuse/cuse.c
64452@@ -611,10 +611,12 @@ static int __init cuse_init(void)
64453 INIT_LIST_HEAD(&cuse_conntbl[i]);
64454
64455 /* inherit and extend fuse_dev_operations */
64456- cuse_channel_fops = fuse_dev_operations;
64457- cuse_channel_fops.owner = THIS_MODULE;
64458- cuse_channel_fops.open = cuse_channel_open;
64459- cuse_channel_fops.release = cuse_channel_release;
64460+ pax_open_kernel();
64461+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
64462+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
64463+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
64464+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
64465+ pax_close_kernel();
64466
64467 cuse_class = class_create(THIS_MODULE, "cuse");
64468 if (IS_ERR(cuse_class))
64469diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
64470index ca88731..8e9c55d 100644
64471--- a/fs/fuse/dev.c
64472+++ b/fs/fuse/dev.c
64473@@ -1318,7 +1318,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
64474 ret = 0;
64475 pipe_lock(pipe);
64476
64477- if (!pipe->readers) {
64478+ if (!atomic_read(&pipe->readers)) {
64479 send_sig(SIGPIPE, current, 0);
64480 if (!ret)
64481 ret = -EPIPE;
64482@@ -1347,7 +1347,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
64483 page_nr++;
64484 ret += buf->len;
64485
64486- if (pipe->files)
64487+ if (atomic_read(&pipe->files))
64488 do_wakeup = 1;
64489 }
64490
64491diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
64492index de1d84a..fd69c0c 100644
64493--- a/fs/fuse/dir.c
64494+++ b/fs/fuse/dir.c
64495@@ -1479,7 +1479,7 @@ static char *read_link(struct dentry *dentry)
64496 return link;
64497 }
64498
64499-static void free_link(char *link)
64500+static void free_link(const char *link)
64501 {
64502 if (!IS_ERR(link))
64503 free_page((unsigned long) link);
64504diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
64505index fd62cae..3494dfa 100644
64506--- a/fs/hostfs/hostfs_kern.c
64507+++ b/fs/hostfs/hostfs_kern.c
64508@@ -908,7 +908,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
64509
64510 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
64511 {
64512- char *s = nd_get_link(nd);
64513+ const char *s = nd_get_link(nd);
64514 if (!IS_ERR(s))
64515 __putname(s);
64516 }
64517diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
64518index 1e2872b..7aea000 100644
64519--- a/fs/hugetlbfs/inode.c
64520+++ b/fs/hugetlbfs/inode.c
64521@@ -154,6 +154,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
64522 struct mm_struct *mm = current->mm;
64523 struct vm_area_struct *vma;
64524 struct hstate *h = hstate_file(file);
64525+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
64526 struct vm_unmapped_area_info info;
64527
64528 if (len & ~huge_page_mask(h))
64529@@ -167,17 +168,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
64530 return addr;
64531 }
64532
64533+#ifdef CONFIG_PAX_RANDMMAP
64534+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
64535+#endif
64536+
64537 if (addr) {
64538 addr = ALIGN(addr, huge_page_size(h));
64539 vma = find_vma(mm, addr);
64540- if (TASK_SIZE - len >= addr &&
64541- (!vma || addr + len <= vma->vm_start))
64542+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
64543 return addr;
64544 }
64545
64546 info.flags = 0;
64547 info.length = len;
64548 info.low_limit = TASK_UNMAPPED_BASE;
64549+
64550+#ifdef CONFIG_PAX_RANDMMAP
64551+ if (mm->pax_flags & MF_PAX_RANDMMAP)
64552+ info.low_limit += mm->delta_mmap;
64553+#endif
64554+
64555 info.high_limit = TASK_SIZE;
64556 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
64557 info.align_offset = 0;
64558@@ -919,7 +929,7 @@ static struct file_system_type hugetlbfs_fs_type = {
64559 };
64560 MODULE_ALIAS_FS("hugetlbfs");
64561
64562-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
64563+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
64564
64565 static int can_do_hugetlb_shm(void)
64566 {
64567diff --git a/fs/inode.c b/fs/inode.c
64568index 26753ba..d19eb34 100644
64569--- a/fs/inode.c
64570+++ b/fs/inode.c
64571@@ -840,16 +840,20 @@ unsigned int get_next_ino(void)
64572 unsigned int *p = &get_cpu_var(last_ino);
64573 unsigned int res = *p;
64574
64575+start:
64576+
64577 #ifdef CONFIG_SMP
64578 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
64579- static atomic_t shared_last_ino;
64580- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
64581+ static atomic_unchecked_t shared_last_ino;
64582+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
64583
64584 res = next - LAST_INO_BATCH;
64585 }
64586 #endif
64587
64588- *p = ++res;
64589+ if (unlikely(!++res))
64590+ goto start; /* never zero */
64591+ *p = res;
64592 put_cpu_var(last_ino);
64593 return res;
64594 }
64595diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
64596index 4a6cf28..d3a29d3 100644
64597--- a/fs/jffs2/erase.c
64598+++ b/fs/jffs2/erase.c
64599@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
64600 struct jffs2_unknown_node marker = {
64601 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
64602 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
64603- .totlen = cpu_to_je32(c->cleanmarker_size)
64604+ .totlen = cpu_to_je32(c->cleanmarker_size),
64605+ .hdr_crc = cpu_to_je32(0)
64606 };
64607
64608 jffs2_prealloc_raw_node_refs(c, jeb, 1);
64609diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
64610index a6597d6..41b30ec 100644
64611--- a/fs/jffs2/wbuf.c
64612+++ b/fs/jffs2/wbuf.c
64613@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
64614 {
64615 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
64616 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
64617- .totlen = constant_cpu_to_je32(8)
64618+ .totlen = constant_cpu_to_je32(8),
64619+ .hdr_crc = constant_cpu_to_je32(0)
64620 };
64621
64622 /*
64623diff --git a/fs/jfs/super.c b/fs/jfs/super.c
64624index adf8cb0..bb935fa 100644
64625--- a/fs/jfs/super.c
64626+++ b/fs/jfs/super.c
64627@@ -893,7 +893,7 @@ static int __init init_jfs_fs(void)
64628
64629 jfs_inode_cachep =
64630 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
64631- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
64632+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
64633 init_once);
64634 if (jfs_inode_cachep == NULL)
64635 return -ENOMEM;
64636diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
64637index a693f5b..82276a1 100644
64638--- a/fs/kernfs/dir.c
64639+++ b/fs/kernfs/dir.c
64640@@ -182,7 +182,7 @@ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
64641 *
64642 * Returns 31 bit hash of ns + name (so it fits in an off_t )
64643 */
64644-static unsigned int kernfs_name_hash(const char *name, const void *ns)
64645+static unsigned int kernfs_name_hash(const unsigned char *name, const void *ns)
64646 {
64647 unsigned long hash = init_name_hash();
64648 unsigned int len = strlen(name);
64649diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
64650index 4429d6d..9831f52 100644
64651--- a/fs/kernfs/file.c
64652+++ b/fs/kernfs/file.c
64653@@ -34,7 +34,7 @@ static DEFINE_MUTEX(kernfs_open_file_mutex);
64654
64655 struct kernfs_open_node {
64656 atomic_t refcnt;
64657- atomic_t event;
64658+ atomic_unchecked_t event;
64659 wait_queue_head_t poll;
64660 struct list_head files; /* goes through kernfs_open_file.list */
64661 };
64662@@ -163,7 +163,7 @@ static int kernfs_seq_show(struct seq_file *sf, void *v)
64663 {
64664 struct kernfs_open_file *of = sf->private;
64665
64666- of->event = atomic_read(&of->kn->attr.open->event);
64667+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
64668
64669 return of->kn->attr.ops->seq_show(sf, v);
64670 }
64671@@ -375,12 +375,12 @@ static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
64672 return ret;
64673 }
64674
64675-static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
64676- void *buf, int len, int write)
64677+static ssize_t kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
64678+ void *buf, size_t len, int write)
64679 {
64680 struct file *file = vma->vm_file;
64681 struct kernfs_open_file *of = kernfs_of(file);
64682- int ret;
64683+ ssize_t ret;
64684
64685 if (!of->vm_ops)
64686 return -EINVAL;
64687@@ -581,7 +581,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
64688 return -ENOMEM;
64689
64690 atomic_set(&new_on->refcnt, 0);
64691- atomic_set(&new_on->event, 1);
64692+ atomic_set_unchecked(&new_on->event, 1);
64693 init_waitqueue_head(&new_on->poll);
64694 INIT_LIST_HEAD(&new_on->files);
64695 goto retry;
64696@@ -787,7 +787,7 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
64697
64698 kernfs_put_active(kn);
64699
64700- if (of->event != atomic_read(&on->event))
64701+ if (of->event != atomic_read_unchecked(&on->event))
64702 goto trigger;
64703
64704 return DEFAULT_POLLMASK;
64705@@ -818,7 +818,7 @@ repeat:
64706
64707 on = kn->attr.open;
64708 if (on) {
64709- atomic_inc(&on->event);
64710+ atomic_inc_unchecked(&on->event);
64711 wake_up_interruptible(&on->poll);
64712 }
64713
64714diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
64715index 8a19889..4c3069a 100644
64716--- a/fs/kernfs/symlink.c
64717+++ b/fs/kernfs/symlink.c
64718@@ -128,7 +128,7 @@ static void *kernfs_iop_follow_link(struct dentry *dentry, struct nameidata *nd)
64719 static void kernfs_iop_put_link(struct dentry *dentry, struct nameidata *nd,
64720 void *cookie)
64721 {
64722- char *page = nd_get_link(nd);
64723+ const char *page = nd_get_link(nd);
64724 if (!IS_ERR(page))
64725 free_page((unsigned long)page);
64726 }
64727diff --git a/fs/libfs.c b/fs/libfs.c
64728index 88e3e00..979c262 100644
64729--- a/fs/libfs.c
64730+++ b/fs/libfs.c
64731@@ -160,6 +160,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
64732
64733 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
64734 struct dentry *next = list_entry(p, struct dentry, d_u.d_child);
64735+ char d_name[sizeof(next->d_iname)];
64736+ const unsigned char *name;
64737+
64738 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
64739 if (!simple_positive(next)) {
64740 spin_unlock(&next->d_lock);
64741@@ -168,7 +171,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
64742
64743 spin_unlock(&next->d_lock);
64744 spin_unlock(&dentry->d_lock);
64745- if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
64746+ name = next->d_name.name;
64747+ if (name == next->d_iname) {
64748+ memcpy(d_name, name, next->d_name.len);
64749+ name = d_name;
64750+ }
64751+ if (!dir_emit(ctx, name, next->d_name.len,
64752 next->d_inode->i_ino, dt_type(next->d_inode)))
64753 return 0;
64754 spin_lock(&dentry->d_lock);
64755@@ -1027,7 +1035,7 @@ EXPORT_SYMBOL(noop_fsync);
64756 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
64757 void *cookie)
64758 {
64759- char *s = nd_get_link(nd);
64760+ const char *s = nd_get_link(nd);
64761 if (!IS_ERR(s))
64762 kfree(s);
64763 }
64764diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
64765index acd3947..1f896e2 100644
64766--- a/fs/lockd/clntproc.c
64767+++ b/fs/lockd/clntproc.c
64768@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
64769 /*
64770 * Cookie counter for NLM requests
64771 */
64772-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
64773+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
64774
64775 void nlmclnt_next_cookie(struct nlm_cookie *c)
64776 {
64777- u32 cookie = atomic_inc_return(&nlm_cookie);
64778+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
64779
64780 memcpy(c->data, &cookie, 4);
64781 c->len=4;
64782diff --git a/fs/locks.c b/fs/locks.c
64783index bb08857..f65e8bf 100644
64784--- a/fs/locks.c
64785+++ b/fs/locks.c
64786@@ -2350,7 +2350,7 @@ void locks_remove_file(struct file *filp)
64787 locks_remove_posix(filp, filp);
64788
64789 if (filp->f_op->flock) {
64790- struct file_lock fl = {
64791+ struct file_lock flock = {
64792 .fl_owner = filp,
64793 .fl_pid = current->tgid,
64794 .fl_file = filp,
64795@@ -2358,9 +2358,9 @@ void locks_remove_file(struct file *filp)
64796 .fl_type = F_UNLCK,
64797 .fl_end = OFFSET_MAX,
64798 };
64799- filp->f_op->flock(filp, F_SETLKW, &fl);
64800- if (fl.fl_ops && fl.fl_ops->fl_release_private)
64801- fl.fl_ops->fl_release_private(&fl);
64802+ filp->f_op->flock(filp, F_SETLKW, &flock);
64803+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
64804+ flock.fl_ops->fl_release_private(&flock);
64805 }
64806
64807 spin_lock(&inode->i_lock);
64808diff --git a/fs/mount.h b/fs/mount.h
64809index 6740a62..ccb472f 100644
64810--- a/fs/mount.h
64811+++ b/fs/mount.h
64812@@ -11,7 +11,7 @@ struct mnt_namespace {
64813 u64 seq; /* Sequence number to prevent loops */
64814 wait_queue_head_t poll;
64815 u64 event;
64816-};
64817+} __randomize_layout;
64818
64819 struct mnt_pcp {
64820 int mnt_count;
64821@@ -57,7 +57,7 @@ struct mount {
64822 int mnt_expiry_mark; /* true if marked for expiry */
64823 struct hlist_head mnt_pins;
64824 struct path mnt_ex_mountpoint;
64825-};
64826+} __randomize_layout;
64827
64828 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
64829
64830diff --git a/fs/namei.c b/fs/namei.c
64831index 3ddb044..5533df9 100644
64832--- a/fs/namei.c
64833+++ b/fs/namei.c
64834@@ -331,17 +331,32 @@ int generic_permission(struct inode *inode, int mask)
64835 if (ret != -EACCES)
64836 return ret;
64837
64838+#ifdef CONFIG_GRKERNSEC
64839+ /* we'll block if we have to log due to a denied capability use */
64840+ if (mask & MAY_NOT_BLOCK)
64841+ return -ECHILD;
64842+#endif
64843+
64844 if (S_ISDIR(inode->i_mode)) {
64845 /* DACs are overridable for directories */
64846- if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
64847- return 0;
64848 if (!(mask & MAY_WRITE))
64849- if (capable_wrt_inode_uidgid(inode,
64850- CAP_DAC_READ_SEARCH))
64851+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
64852+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
64853 return 0;
64854+ if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
64855+ return 0;
64856 return -EACCES;
64857 }
64858 /*
64859+ * Searching includes executable on directories, else just read.
64860+ */
64861+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
64862+ if (mask == MAY_READ)
64863+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
64864+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
64865+ return 0;
64866+
64867+ /*
64868 * Read/write DACs are always overridable.
64869 * Executable DACs are overridable when there is
64870 * at least one exec bit set.
64871@@ -350,14 +365,6 @@ int generic_permission(struct inode *inode, int mask)
64872 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
64873 return 0;
64874
64875- /*
64876- * Searching includes executable on directories, else just read.
64877- */
64878- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
64879- if (mask == MAY_READ)
64880- if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
64881- return 0;
64882-
64883 return -EACCES;
64884 }
64885 EXPORT_SYMBOL(generic_permission);
64886@@ -823,7 +830,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
64887 {
64888 struct dentry *dentry = link->dentry;
64889 int error;
64890- char *s;
64891+ const char *s;
64892
64893 BUG_ON(nd->flags & LOOKUP_RCU);
64894
64895@@ -844,6 +851,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
64896 if (error)
64897 goto out_put_nd_path;
64898
64899+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
64900+ dentry->d_inode, dentry, nd->path.mnt)) {
64901+ error = -EACCES;
64902+ goto out_put_nd_path;
64903+ }
64904+
64905 nd->last_type = LAST_BIND;
64906 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
64907 error = PTR_ERR(*p);
64908@@ -1607,6 +1620,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
64909 if (res)
64910 break;
64911 res = walk_component(nd, path, LOOKUP_FOLLOW);
64912+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
64913+ res = -EACCES;
64914 put_link(nd, &link, cookie);
64915 } while (res > 0);
64916
64917@@ -1679,7 +1694,7 @@ EXPORT_SYMBOL(full_name_hash);
64918 static inline u64 hash_name(const char *name)
64919 {
64920 unsigned long a, b, adata, bdata, mask, hash, len;
64921- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
64922+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
64923
64924 hash = a = 0;
64925 len = -sizeof(unsigned long);
64926@@ -1968,6 +1983,8 @@ static int path_lookupat(int dfd, const char *name,
64927 if (err)
64928 break;
64929 err = lookup_last(nd, &path);
64930+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
64931+ err = -EACCES;
64932 put_link(nd, &link, cookie);
64933 }
64934 }
64935@@ -1975,6 +1992,13 @@ static int path_lookupat(int dfd, const char *name,
64936 if (!err)
64937 err = complete_walk(nd);
64938
64939+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
64940+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
64941+ path_put(&nd->path);
64942+ err = -ENOENT;
64943+ }
64944+ }
64945+
64946 if (!err && nd->flags & LOOKUP_DIRECTORY) {
64947 if (!d_can_lookup(nd->path.dentry)) {
64948 path_put(&nd->path);
64949@@ -2002,8 +2026,15 @@ static int filename_lookup(int dfd, struct filename *name,
64950 retval = path_lookupat(dfd, name->name,
64951 flags | LOOKUP_REVAL, nd);
64952
64953- if (likely(!retval))
64954+ if (likely(!retval)) {
64955 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
64956+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
64957+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
64958+ path_put(&nd->path);
64959+ return -ENOENT;
64960+ }
64961+ }
64962+ }
64963 return retval;
64964 }
64965
64966@@ -2585,6 +2616,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
64967 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
64968 return -EPERM;
64969
64970+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
64971+ return -EPERM;
64972+ if (gr_handle_rawio(inode))
64973+ return -EPERM;
64974+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
64975+ return -EACCES;
64976+
64977 return 0;
64978 }
64979
64980@@ -2816,7 +2854,7 @@ looked_up:
64981 * cleared otherwise prior to returning.
64982 */
64983 static int lookup_open(struct nameidata *nd, struct path *path,
64984- struct file *file,
64985+ struct path *link, struct file *file,
64986 const struct open_flags *op,
64987 bool got_write, int *opened)
64988 {
64989@@ -2851,6 +2889,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
64990 /* Negative dentry, just create the file */
64991 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
64992 umode_t mode = op->mode;
64993+
64994+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
64995+ error = -EACCES;
64996+ goto out_dput;
64997+ }
64998+
64999+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
65000+ error = -EACCES;
65001+ goto out_dput;
65002+ }
65003+
65004 if (!IS_POSIXACL(dir->d_inode))
65005 mode &= ~current_umask();
65006 /*
65007@@ -2872,6 +2921,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
65008 nd->flags & LOOKUP_EXCL);
65009 if (error)
65010 goto out_dput;
65011+ else
65012+ gr_handle_create(dentry, nd->path.mnt);
65013 }
65014 out_no_open:
65015 path->dentry = dentry;
65016@@ -2886,7 +2937,7 @@ out_dput:
65017 /*
65018 * Handle the last step of open()
65019 */
65020-static int do_last(struct nameidata *nd, struct path *path,
65021+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
65022 struct file *file, const struct open_flags *op,
65023 int *opened, struct filename *name)
65024 {
65025@@ -2936,6 +2987,15 @@ static int do_last(struct nameidata *nd, struct path *path,
65026 if (error)
65027 return error;
65028
65029+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
65030+ error = -ENOENT;
65031+ goto out;
65032+ }
65033+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
65034+ error = -EACCES;
65035+ goto out;
65036+ }
65037+
65038 audit_inode(name, dir, LOOKUP_PARENT);
65039 error = -EISDIR;
65040 /* trailing slashes? */
65041@@ -2955,7 +3015,7 @@ retry_lookup:
65042 */
65043 }
65044 mutex_lock(&dir->d_inode->i_mutex);
65045- error = lookup_open(nd, path, file, op, got_write, opened);
65046+ error = lookup_open(nd, path, link, file, op, got_write, opened);
65047 mutex_unlock(&dir->d_inode->i_mutex);
65048
65049 if (error <= 0) {
65050@@ -2979,11 +3039,28 @@ retry_lookup:
65051 goto finish_open_created;
65052 }
65053
65054+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
65055+ error = -ENOENT;
65056+ goto exit_dput;
65057+ }
65058+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
65059+ error = -EACCES;
65060+ goto exit_dput;
65061+ }
65062+
65063 /*
65064 * create/update audit record if it already exists.
65065 */
65066- if (d_is_positive(path->dentry))
65067+ if (d_is_positive(path->dentry)) {
65068+ /* only check if O_CREAT is specified, all other checks need to go
65069+ into may_open */
65070+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
65071+ error = -EACCES;
65072+ goto exit_dput;
65073+ }
65074+
65075 audit_inode(name, path->dentry, 0);
65076+ }
65077
65078 /*
65079 * If atomic_open() acquired write access it is dropped now due to
65080@@ -3024,6 +3101,11 @@ finish_lookup:
65081 }
65082 }
65083 BUG_ON(inode != path->dentry->d_inode);
65084+ /* if we're resolving a symlink to another symlink */
65085+ if (link && gr_handle_symlink_owner(link, inode)) {
65086+ error = -EACCES;
65087+ goto out;
65088+ }
65089 return 1;
65090 }
65091
65092@@ -3033,7 +3115,6 @@ finish_lookup:
65093 save_parent.dentry = nd->path.dentry;
65094 save_parent.mnt = mntget(path->mnt);
65095 nd->path.dentry = path->dentry;
65096-
65097 }
65098 nd->inode = inode;
65099 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
65100@@ -3043,7 +3124,18 @@ finish_open:
65101 path_put(&save_parent);
65102 return error;
65103 }
65104+
65105+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
65106+ error = -ENOENT;
65107+ goto out;
65108+ }
65109+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
65110+ error = -EACCES;
65111+ goto out;
65112+ }
65113+
65114 audit_inode(name, nd->path.dentry, 0);
65115+
65116 error = -EISDIR;
65117 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
65118 goto out;
65119@@ -3206,7 +3298,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
65120 if (unlikely(error))
65121 goto out;
65122
65123- error = do_last(nd, &path, file, op, &opened, pathname);
65124+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
65125 while (unlikely(error > 0)) { /* trailing symlink */
65126 struct path link = path;
65127 void *cookie;
65128@@ -3224,7 +3316,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
65129 error = follow_link(&link, nd, &cookie);
65130 if (unlikely(error))
65131 break;
65132- error = do_last(nd, &path, file, op, &opened, pathname);
65133+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
65134 put_link(nd, &link, cookie);
65135 }
65136 out:
65137@@ -3324,9 +3416,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
65138 goto unlock;
65139
65140 error = -EEXIST;
65141- if (d_is_positive(dentry))
65142+ if (d_is_positive(dentry)) {
65143+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
65144+ error = -ENOENT;
65145 goto fail;
65146-
65147+ }
65148 /*
65149 * Special case - lookup gave negative, but... we had foo/bar/
65150 * From the vfs_mknod() POV we just have a negative dentry -
65151@@ -3378,6 +3472,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
65152 }
65153 EXPORT_SYMBOL(user_path_create);
65154
65155+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
65156+{
65157+ struct filename *tmp = getname(pathname);
65158+ struct dentry *res;
65159+ if (IS_ERR(tmp))
65160+ return ERR_CAST(tmp);
65161+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
65162+ if (IS_ERR(res))
65163+ putname(tmp);
65164+ else
65165+ *to = tmp;
65166+ return res;
65167+}
65168+
65169 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
65170 {
65171 int error = may_create(dir, dentry);
65172@@ -3441,6 +3549,17 @@ retry:
65173
65174 if (!IS_POSIXACL(path.dentry->d_inode))
65175 mode &= ~current_umask();
65176+
65177+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
65178+ error = -EPERM;
65179+ goto out;
65180+ }
65181+
65182+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
65183+ error = -EACCES;
65184+ goto out;
65185+ }
65186+
65187 error = security_path_mknod(&path, dentry, mode, dev);
65188 if (error)
65189 goto out;
65190@@ -3456,6 +3575,8 @@ retry:
65191 error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
65192 break;
65193 }
65194+ if (!error)
65195+ gr_handle_create(dentry, path.mnt);
65196 out:
65197 done_path_create(&path, dentry);
65198 if (retry_estale(error, lookup_flags)) {
65199@@ -3510,9 +3631,16 @@ retry:
65200
65201 if (!IS_POSIXACL(path.dentry->d_inode))
65202 mode &= ~current_umask();
65203+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
65204+ error = -EACCES;
65205+ goto out;
65206+ }
65207 error = security_path_mkdir(&path, dentry, mode);
65208 if (!error)
65209 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
65210+ if (!error)
65211+ gr_handle_create(dentry, path.mnt);
65212+out:
65213 done_path_create(&path, dentry);
65214 if (retry_estale(error, lookup_flags)) {
65215 lookup_flags |= LOOKUP_REVAL;
65216@@ -3595,6 +3723,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
65217 struct filename *name;
65218 struct dentry *dentry;
65219 struct nameidata nd;
65220+ ino_t saved_ino = 0;
65221+ dev_t saved_dev = 0;
65222 unsigned int lookup_flags = 0;
65223 retry:
65224 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
65225@@ -3627,10 +3757,21 @@ retry:
65226 error = -ENOENT;
65227 goto exit3;
65228 }
65229+
65230+ saved_ino = dentry->d_inode->i_ino;
65231+ saved_dev = gr_get_dev_from_dentry(dentry);
65232+
65233+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
65234+ error = -EACCES;
65235+ goto exit3;
65236+ }
65237+
65238 error = security_path_rmdir(&nd.path, dentry);
65239 if (error)
65240 goto exit3;
65241 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
65242+ if (!error && (saved_dev || saved_ino))
65243+ gr_handle_delete(saved_ino, saved_dev);
65244 exit3:
65245 dput(dentry);
65246 exit2:
65247@@ -3721,6 +3862,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
65248 struct nameidata nd;
65249 struct inode *inode = NULL;
65250 struct inode *delegated_inode = NULL;
65251+ ino_t saved_ino = 0;
65252+ dev_t saved_dev = 0;
65253 unsigned int lookup_flags = 0;
65254 retry:
65255 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
65256@@ -3747,10 +3890,22 @@ retry_deleg:
65257 if (d_is_negative(dentry))
65258 goto slashes;
65259 ihold(inode);
65260+
65261+ if (inode->i_nlink <= 1) {
65262+ saved_ino = inode->i_ino;
65263+ saved_dev = gr_get_dev_from_dentry(dentry);
65264+ }
65265+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
65266+ error = -EACCES;
65267+ goto exit2;
65268+ }
65269+
65270 error = security_path_unlink(&nd.path, dentry);
65271 if (error)
65272 goto exit2;
65273 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
65274+ if (!error && (saved_ino || saved_dev))
65275+ gr_handle_delete(saved_ino, saved_dev);
65276 exit2:
65277 dput(dentry);
65278 }
65279@@ -3839,9 +3994,17 @@ retry:
65280 if (IS_ERR(dentry))
65281 goto out_putname;
65282
65283+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
65284+ error = -EACCES;
65285+ goto out;
65286+ }
65287+
65288 error = security_path_symlink(&path, dentry, from->name);
65289 if (!error)
65290 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
65291+ if (!error)
65292+ gr_handle_create(dentry, path.mnt);
65293+out:
65294 done_path_create(&path, dentry);
65295 if (retry_estale(error, lookup_flags)) {
65296 lookup_flags |= LOOKUP_REVAL;
65297@@ -3945,6 +4108,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
65298 struct dentry *new_dentry;
65299 struct path old_path, new_path;
65300 struct inode *delegated_inode = NULL;
65301+ struct filename *to = NULL;
65302 int how = 0;
65303 int error;
65304
65305@@ -3968,7 +4132,7 @@ retry:
65306 if (error)
65307 return error;
65308
65309- new_dentry = user_path_create(newdfd, newname, &new_path,
65310+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
65311 (how & LOOKUP_REVAL));
65312 error = PTR_ERR(new_dentry);
65313 if (IS_ERR(new_dentry))
65314@@ -3980,11 +4144,28 @@ retry:
65315 error = may_linkat(&old_path);
65316 if (unlikely(error))
65317 goto out_dput;
65318+
65319+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
65320+ old_path.dentry->d_inode,
65321+ old_path.dentry->d_inode->i_mode, to)) {
65322+ error = -EACCES;
65323+ goto out_dput;
65324+ }
65325+
65326+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
65327+ old_path.dentry, old_path.mnt, to)) {
65328+ error = -EACCES;
65329+ goto out_dput;
65330+ }
65331+
65332 error = security_path_link(old_path.dentry, &new_path, new_dentry);
65333 if (error)
65334 goto out_dput;
65335 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
65336+ if (!error)
65337+ gr_handle_create(new_dentry, new_path.mnt);
65338 out_dput:
65339+ putname(to);
65340 done_path_create(&new_path, new_dentry);
65341 if (delegated_inode) {
65342 error = break_deleg_wait(&delegated_inode);
65343@@ -4295,6 +4476,12 @@ retry_deleg:
65344 if (new_dentry == trap)
65345 goto exit5;
65346
65347+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
65348+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
65349+ to, flags);
65350+ if (error)
65351+ goto exit5;
65352+
65353 error = security_path_rename(&oldnd.path, old_dentry,
65354 &newnd.path, new_dentry, flags);
65355 if (error)
65356@@ -4302,6 +4489,9 @@ retry_deleg:
65357 error = vfs_rename(old_dir->d_inode, old_dentry,
65358 new_dir->d_inode, new_dentry,
65359 &delegated_inode, flags);
65360+ if (!error)
65361+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
65362+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0, flags);
65363 exit5:
65364 dput(new_dentry);
65365 exit4:
65366@@ -4344,14 +4534,24 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
65367
65368 int readlink_copy(char __user *buffer, int buflen, const char *link)
65369 {
65370+ char tmpbuf[64];
65371+ const char *newlink;
65372 int len = PTR_ERR(link);
65373+
65374 if (IS_ERR(link))
65375 goto out;
65376
65377 len = strlen(link);
65378 if (len > (unsigned) buflen)
65379 len = buflen;
65380- if (copy_to_user(buffer, link, len))
65381+
65382+ if (len < sizeof(tmpbuf)) {
65383+ memcpy(tmpbuf, link, len);
65384+ newlink = tmpbuf;
65385+ } else
65386+ newlink = link;
65387+
65388+ if (copy_to_user(buffer, newlink, len))
65389 len = -EFAULT;
65390 out:
65391 return len;
65392diff --git a/fs/namespace.c b/fs/namespace.c
65393index 7f67b46..c4ad324 100644
65394--- a/fs/namespace.c
65395+++ b/fs/namespace.c
65396@@ -1362,6 +1362,9 @@ static int do_umount(struct mount *mnt, int flags)
65397 if (!(sb->s_flags & MS_RDONLY))
65398 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
65399 up_write(&sb->s_umount);
65400+
65401+ gr_log_remount(mnt->mnt_devname, retval);
65402+
65403 return retval;
65404 }
65405
65406@@ -1384,6 +1387,9 @@ static int do_umount(struct mount *mnt, int flags)
65407 }
65408 unlock_mount_hash();
65409 namespace_unlock();
65410+
65411+ gr_log_unmount(mnt->mnt_devname, retval);
65412+
65413 return retval;
65414 }
65415
65416@@ -1403,7 +1409,7 @@ static inline bool may_mount(void)
65417 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
65418 */
65419
65420-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
65421+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
65422 {
65423 struct path path;
65424 struct mount *mnt;
65425@@ -1445,7 +1451,7 @@ out:
65426 /*
65427 * The 2.0 compatible umount. No flags.
65428 */
65429-SYSCALL_DEFINE1(oldumount, char __user *, name)
65430+SYSCALL_DEFINE1(oldumount, const char __user *, name)
65431 {
65432 return sys_umount(name, 0);
65433 }
65434@@ -2494,6 +2500,16 @@ long do_mount(const char *dev_name, const char *dir_name,
65435 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
65436 MS_STRICTATIME);
65437
65438+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
65439+ retval = -EPERM;
65440+ goto dput_out;
65441+ }
65442+
65443+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
65444+ retval = -EPERM;
65445+ goto dput_out;
65446+ }
65447+
65448 if (flags & MS_REMOUNT)
65449 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
65450 data_page);
65451@@ -2508,6 +2524,9 @@ long do_mount(const char *dev_name, const char *dir_name,
65452 dev_name, data_page);
65453 dput_out:
65454 path_put(&path);
65455+
65456+ gr_log_mount(dev_name, dir_name, retval);
65457+
65458 return retval;
65459 }
65460
65461@@ -2525,7 +2544,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
65462 * number incrementing at 10Ghz will take 12,427 years to wrap which
65463 * is effectively never, so we can ignore the possibility.
65464 */
65465-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
65466+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
65467
65468 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
65469 {
65470@@ -2540,7 +2559,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
65471 kfree(new_ns);
65472 return ERR_PTR(ret);
65473 }
65474- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
65475+ new_ns->seq = atomic64_inc_return_unchecked(&mnt_ns_seq);
65476 atomic_set(&new_ns->count, 1);
65477 new_ns->root = NULL;
65478 INIT_LIST_HEAD(&new_ns->list);
65479@@ -2550,7 +2569,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
65480 return new_ns;
65481 }
65482
65483-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
65484+__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
65485 struct user_namespace *user_ns, struct fs_struct *new_fs)
65486 {
65487 struct mnt_namespace *new_ns;
65488@@ -2671,8 +2690,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
65489 }
65490 EXPORT_SYMBOL(mount_subtree);
65491
65492-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
65493- char __user *, type, unsigned long, flags, void __user *, data)
65494+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
65495+ const char __user *, type, unsigned long, flags, void __user *, data)
65496 {
65497 int ret;
65498 char *kernel_type;
65499@@ -2785,6 +2804,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
65500 if (error)
65501 goto out2;
65502
65503+ if (gr_handle_chroot_pivot()) {
65504+ error = -EPERM;
65505+ goto out2;
65506+ }
65507+
65508 get_fs_root(current->fs, &root);
65509 old_mp = lock_mount(&old);
65510 error = PTR_ERR(old_mp);
65511@@ -2822,6 +2846,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
65512 /* make sure we can reach put_old from new_root */
65513 if (!is_path_reachable(old_mnt, old.dentry, &new))
65514 goto out4;
65515+ /* make certain new is below the root */
65516+ if (!is_path_reachable(new_mnt, new.dentry, &root))
65517+ goto out4;
65518 root_mp->m_count++; /* pin it so it won't go away */
65519 lock_mount_hash();
65520 detach_mnt(new_mnt, &parent_path);
65521@@ -3053,7 +3080,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
65522 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
65523 return -EPERM;
65524
65525- if (fs->users != 1)
65526+ if (atomic_read(&fs->users) != 1)
65527 return -EINVAL;
65528
65529 get_mnt_ns(mnt_ns);
65530diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
65531index f4ccfe6..a5cf064 100644
65532--- a/fs/nfs/callback_xdr.c
65533+++ b/fs/nfs/callback_xdr.c
65534@@ -51,7 +51,7 @@ struct callback_op {
65535 callback_decode_arg_t decode_args;
65536 callback_encode_res_t encode_res;
65537 long res_maxsize;
65538-};
65539+} __do_const;
65540
65541 static struct callback_op callback_ops[];
65542
65543diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
65544index 577a36f..1cde799 100644
65545--- a/fs/nfs/inode.c
65546+++ b/fs/nfs/inode.c
65547@@ -1228,16 +1228,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
65548 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
65549 }
65550
65551-static atomic_long_t nfs_attr_generation_counter;
65552+static atomic_long_unchecked_t nfs_attr_generation_counter;
65553
65554 static unsigned long nfs_read_attr_generation_counter(void)
65555 {
65556- return atomic_long_read(&nfs_attr_generation_counter);
65557+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
65558 }
65559
65560 unsigned long nfs_inc_attr_generation_counter(void)
65561 {
65562- return atomic_long_inc_return(&nfs_attr_generation_counter);
65563+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
65564 }
65565
65566 void nfs_fattr_init(struct nfs_fattr *fattr)
65567diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
65568index 5e0dc52..64681bc 100644
65569--- a/fs/nfsd/nfs4proc.c
65570+++ b/fs/nfsd/nfs4proc.c
65571@@ -1155,7 +1155,7 @@ struct nfsd4_operation {
65572 nfsd4op_rsize op_rsize_bop;
65573 stateid_getter op_get_currentstateid;
65574 stateid_setter op_set_currentstateid;
65575-};
65576+} __do_const;
65577
65578 static struct nfsd4_operation nfsd4_ops[];
65579
65580diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
65581index 353aac8..32035ee 100644
65582--- a/fs/nfsd/nfs4xdr.c
65583+++ b/fs/nfsd/nfs4xdr.c
65584@@ -1534,7 +1534,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
65585
65586 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
65587
65588-static nfsd4_dec nfsd4_dec_ops[] = {
65589+static const nfsd4_dec nfsd4_dec_ops[] = {
65590 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
65591 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
65592 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
65593diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
65594index ff95676..96cf3f62 100644
65595--- a/fs/nfsd/nfscache.c
65596+++ b/fs/nfsd/nfscache.c
65597@@ -527,17 +527,20 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
65598 {
65599 struct svc_cacherep *rp = rqstp->rq_cacherep;
65600 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
65601- int len;
65602+ long len;
65603 size_t bufsize = 0;
65604
65605 if (!rp)
65606 return;
65607
65608- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
65609- len >>= 2;
65610+ if (statp) {
65611+ len = (char*)statp - (char*)resv->iov_base;
65612+ len = resv->iov_len - len;
65613+ len >>= 2;
65614+ }
65615
65616 /* Don't cache excessive amounts of data and XDR failures */
65617- if (!statp || len > (256 >> 2)) {
65618+ if (!statp || len > (256 >> 2) || len < 0) {
65619 nfsd_reply_cache_free(rp);
65620 return;
65621 }
65622@@ -545,7 +548,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
65623 switch (cachetype) {
65624 case RC_REPLSTAT:
65625 if (len != 1)
65626- printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
65627+ printk("nfsd: RC_REPLSTAT/reply len %ld!\n",len);
65628 rp->c_replstat = *statp;
65629 break;
65630 case RC_REPLBUFF:
65631diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
65632index 6ab077b..5ac7f0b 100644
65633--- a/fs/nfsd/vfs.c
65634+++ b/fs/nfsd/vfs.c
65635@@ -855,7 +855,7 @@ __be32 nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen,
65636
65637 oldfs = get_fs();
65638 set_fs(KERNEL_DS);
65639- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
65640+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
65641 set_fs(oldfs);
65642 return nfsd_finish_read(file, count, host_err);
65643 }
65644@@ -943,7 +943,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
65645
65646 /* Write the data. */
65647 oldfs = get_fs(); set_fs(KERNEL_DS);
65648- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
65649+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
65650 set_fs(oldfs);
65651 if (host_err < 0)
65652 goto out_nfserr;
65653@@ -1485,7 +1485,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
65654 */
65655
65656 oldfs = get_fs(); set_fs(KERNEL_DS);
65657- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
65658+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
65659 set_fs(oldfs);
65660
65661 if (host_err < 0)
65662diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
65663index 52ccd34..7a6b202 100644
65664--- a/fs/nls/nls_base.c
65665+++ b/fs/nls/nls_base.c
65666@@ -234,21 +234,25 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
65667
65668 int __register_nls(struct nls_table *nls, struct module *owner)
65669 {
65670- struct nls_table ** tmp = &tables;
65671+ struct nls_table *tmp = tables;
65672
65673 if (nls->next)
65674 return -EBUSY;
65675
65676- nls->owner = owner;
65677+ pax_open_kernel();
65678+ *(void **)&nls->owner = owner;
65679+ pax_close_kernel();
65680 spin_lock(&nls_lock);
65681- while (*tmp) {
65682- if (nls == *tmp) {
65683+ while (tmp) {
65684+ if (nls == tmp) {
65685 spin_unlock(&nls_lock);
65686 return -EBUSY;
65687 }
65688- tmp = &(*tmp)->next;
65689+ tmp = tmp->next;
65690 }
65691- nls->next = tables;
65692+ pax_open_kernel();
65693+ *(struct nls_table **)&nls->next = tables;
65694+ pax_close_kernel();
65695 tables = nls;
65696 spin_unlock(&nls_lock);
65697 return 0;
65698@@ -257,12 +261,14 @@ EXPORT_SYMBOL(__register_nls);
65699
65700 int unregister_nls(struct nls_table * nls)
65701 {
65702- struct nls_table ** tmp = &tables;
65703+ struct nls_table * const * tmp = &tables;
65704
65705 spin_lock(&nls_lock);
65706 while (*tmp) {
65707 if (nls == *tmp) {
65708- *tmp = nls->next;
65709+ pax_open_kernel();
65710+ *(struct nls_table **)tmp = nls->next;
65711+ pax_close_kernel();
65712 spin_unlock(&nls_lock);
65713 return 0;
65714 }
65715@@ -272,7 +278,7 @@ int unregister_nls(struct nls_table * nls)
65716 return -EINVAL;
65717 }
65718
65719-static struct nls_table *find_nls(char *charset)
65720+static struct nls_table *find_nls(const char *charset)
65721 {
65722 struct nls_table *nls;
65723 spin_lock(&nls_lock);
65724@@ -288,7 +294,7 @@ static struct nls_table *find_nls(char *charset)
65725 return nls;
65726 }
65727
65728-struct nls_table *load_nls(char *charset)
65729+struct nls_table *load_nls(const char *charset)
65730 {
65731 return try_then_request_module(find_nls(charset), "nls_%s", charset);
65732 }
65733diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
65734index 162b3f1..6076a7c 100644
65735--- a/fs/nls/nls_euc-jp.c
65736+++ b/fs/nls/nls_euc-jp.c
65737@@ -560,8 +560,10 @@ static int __init init_nls_euc_jp(void)
65738 p_nls = load_nls("cp932");
65739
65740 if (p_nls) {
65741- table.charset2upper = p_nls->charset2upper;
65742- table.charset2lower = p_nls->charset2lower;
65743+ pax_open_kernel();
65744+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
65745+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
65746+ pax_close_kernel();
65747 return register_nls(&table);
65748 }
65749
65750diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
65751index a80a741..7b96e1b 100644
65752--- a/fs/nls/nls_koi8-ru.c
65753+++ b/fs/nls/nls_koi8-ru.c
65754@@ -62,8 +62,10 @@ static int __init init_nls_koi8_ru(void)
65755 p_nls = load_nls("koi8-u");
65756
65757 if (p_nls) {
65758- table.charset2upper = p_nls->charset2upper;
65759- table.charset2lower = p_nls->charset2lower;
65760+ pax_open_kernel();
65761+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
65762+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
65763+ pax_close_kernel();
65764 return register_nls(&table);
65765 }
65766
65767diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
65768index c991616..5ae51af 100644
65769--- a/fs/notify/fanotify/fanotify_user.c
65770+++ b/fs/notify/fanotify/fanotify_user.c
65771@@ -216,8 +216,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
65772
65773 fd = fanotify_event_metadata.fd;
65774 ret = -EFAULT;
65775- if (copy_to_user(buf, &fanotify_event_metadata,
65776- fanotify_event_metadata.event_len))
65777+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
65778+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
65779 goto out_close_fd;
65780
65781 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
65782diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
65783index 0f88bc0..7d888d7 100644
65784--- a/fs/notify/inotify/inotify_fsnotify.c
65785+++ b/fs/notify/inotify/inotify_fsnotify.c
65786@@ -165,8 +165,10 @@ static void inotify_free_group_priv(struct fsnotify_group *group)
65787 /* ideally the idr is empty and we won't hit the BUG in the callback */
65788 idr_for_each(&group->inotify_data.idr, idr_callback, group);
65789 idr_destroy(&group->inotify_data.idr);
65790- atomic_dec(&group->inotify_data.user->inotify_devs);
65791- free_uid(group->inotify_data.user);
65792+ if (group->inotify_data.user) {
65793+ atomic_dec(&group->inotify_data.user->inotify_devs);
65794+ free_uid(group->inotify_data.user);
65795+ }
65796 }
65797
65798 static void inotify_free_event(struct fsnotify_event *fsn_event)
65799diff --git a/fs/notify/notification.c b/fs/notify/notification.c
65800index a95d8e0..a91a5fd 100644
65801--- a/fs/notify/notification.c
65802+++ b/fs/notify/notification.c
65803@@ -48,7 +48,7 @@
65804 #include <linux/fsnotify_backend.h>
65805 #include "fsnotify.h"
65806
65807-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
65808+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
65809
65810 /**
65811 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
65812@@ -56,7 +56,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
65813 */
65814 u32 fsnotify_get_cookie(void)
65815 {
65816- return atomic_inc_return(&fsnotify_sync_cookie);
65817+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
65818 }
65819 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
65820
65821diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
65822index 9e38daf..5727cae 100644
65823--- a/fs/ntfs/dir.c
65824+++ b/fs/ntfs/dir.c
65825@@ -1310,7 +1310,7 @@ find_next_index_buffer:
65826 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
65827 ~(s64)(ndir->itype.index.block_size - 1)));
65828 /* Bounds checks. */
65829- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
65830+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
65831 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
65832 "inode 0x%lx or driver bug.", vdir->i_ino);
65833 goto err_out;
65834diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
65835index f5ec1ce..807fd78 100644
65836--- a/fs/ntfs/file.c
65837+++ b/fs/ntfs/file.c
65838@@ -1279,7 +1279,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
65839 char *addr;
65840 size_t total = 0;
65841 unsigned len;
65842- int left;
65843+ unsigned left;
65844
65845 do {
65846 len = PAGE_CACHE_SIZE - ofs;
65847diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
65848index 6c3296e..c0b99f0 100644
65849--- a/fs/ntfs/super.c
65850+++ b/fs/ntfs/super.c
65851@@ -688,7 +688,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
65852 if (!silent)
65853 ntfs_error(sb, "Primary boot sector is invalid.");
65854 } else if (!silent)
65855- ntfs_error(sb, read_err_str, "primary");
65856+ ntfs_error(sb, read_err_str, "%s", "primary");
65857 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
65858 if (bh_primary)
65859 brelse(bh_primary);
65860@@ -704,7 +704,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
65861 goto hotfix_primary_boot_sector;
65862 brelse(bh_backup);
65863 } else if (!silent)
65864- ntfs_error(sb, read_err_str, "backup");
65865+ ntfs_error(sb, read_err_str, "%s", "backup");
65866 /* Try to read NT3.51- backup boot sector. */
65867 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
65868 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
65869@@ -715,7 +715,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
65870 "sector.");
65871 brelse(bh_backup);
65872 } else if (!silent)
65873- ntfs_error(sb, read_err_str, "backup");
65874+ ntfs_error(sb, read_err_str, "%s", "backup");
65875 /* We failed. Cleanup and return. */
65876 if (bh_primary)
65877 brelse(bh_primary);
65878diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
65879index 0440134..d52c93a 100644
65880--- a/fs/ocfs2/localalloc.c
65881+++ b/fs/ocfs2/localalloc.c
65882@@ -1320,7 +1320,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
65883 goto bail;
65884 }
65885
65886- atomic_inc(&osb->alloc_stats.moves);
65887+ atomic_inc_unchecked(&osb->alloc_stats.moves);
65888
65889 bail:
65890 if (handle)
65891diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
65892index 8add6f1..b931e04 100644
65893--- a/fs/ocfs2/namei.c
65894+++ b/fs/ocfs2/namei.c
65895@@ -158,7 +158,7 @@ bail_add:
65896 * NOTE: This dentry already has ->d_op set from
65897 * ocfs2_get_parent() and ocfs2_get_dentry()
65898 */
65899- if (ret)
65900+ if (!IS_ERR_OR_NULL(ret))
65901 dentry = ret;
65902
65903 status = ocfs2_dentry_attach_lock(dentry, inode,
65904diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
65905index bbec539..7b266d5 100644
65906--- a/fs/ocfs2/ocfs2.h
65907+++ b/fs/ocfs2/ocfs2.h
65908@@ -236,11 +236,11 @@ enum ocfs2_vol_state
65909
65910 struct ocfs2_alloc_stats
65911 {
65912- atomic_t moves;
65913- atomic_t local_data;
65914- atomic_t bitmap_data;
65915- atomic_t bg_allocs;
65916- atomic_t bg_extends;
65917+ atomic_unchecked_t moves;
65918+ atomic_unchecked_t local_data;
65919+ atomic_unchecked_t bitmap_data;
65920+ atomic_unchecked_t bg_allocs;
65921+ atomic_unchecked_t bg_extends;
65922 };
65923
65924 enum ocfs2_local_alloc_state
65925diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
65926index 0cb889a..6a26b24 100644
65927--- a/fs/ocfs2/suballoc.c
65928+++ b/fs/ocfs2/suballoc.c
65929@@ -867,7 +867,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
65930 mlog_errno(status);
65931 goto bail;
65932 }
65933- atomic_inc(&osb->alloc_stats.bg_extends);
65934+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
65935
65936 /* You should never ask for this much metadata */
65937 BUG_ON(bits_wanted >
65938@@ -2014,7 +2014,7 @@ int ocfs2_claim_metadata(handle_t *handle,
65939 mlog_errno(status);
65940 goto bail;
65941 }
65942- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65943+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65944
65945 *suballoc_loc = res.sr_bg_blkno;
65946 *suballoc_bit_start = res.sr_bit_offset;
65947@@ -2180,7 +2180,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
65948 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
65949 res->sr_bits);
65950
65951- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65952+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65953
65954 BUG_ON(res->sr_bits != 1);
65955
65956@@ -2222,7 +2222,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
65957 mlog_errno(status);
65958 goto bail;
65959 }
65960- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65961+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65962
65963 BUG_ON(res.sr_bits != 1);
65964
65965@@ -2326,7 +2326,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
65966 cluster_start,
65967 num_clusters);
65968 if (!status)
65969- atomic_inc(&osb->alloc_stats.local_data);
65970+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
65971 } else {
65972 if (min_clusters > (osb->bitmap_cpg - 1)) {
65973 /* The only paths asking for contiguousness
65974@@ -2352,7 +2352,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
65975 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
65976 res.sr_bg_blkno,
65977 res.sr_bit_offset);
65978- atomic_inc(&osb->alloc_stats.bitmap_data);
65979+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
65980 *num_clusters = res.sr_bits;
65981 }
65982 }
65983diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
65984index 4142546..69375a9 100644
65985--- a/fs/ocfs2/super.c
65986+++ b/fs/ocfs2/super.c
65987@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
65988 "%10s => GlobalAllocs: %d LocalAllocs: %d "
65989 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
65990 "Stats",
65991- atomic_read(&osb->alloc_stats.bitmap_data),
65992- atomic_read(&osb->alloc_stats.local_data),
65993- atomic_read(&osb->alloc_stats.bg_allocs),
65994- atomic_read(&osb->alloc_stats.moves),
65995- atomic_read(&osb->alloc_stats.bg_extends));
65996+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
65997+ atomic_read_unchecked(&osb->alloc_stats.local_data),
65998+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
65999+ atomic_read_unchecked(&osb->alloc_stats.moves),
66000+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
66001
66002 out += snprintf(buf + out, len - out,
66003 "%10s => State: %u Descriptor: %llu Size: %u bits "
66004@@ -2100,11 +2100,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
66005
66006 mutex_init(&osb->system_file_mutex);
66007
66008- atomic_set(&osb->alloc_stats.moves, 0);
66009- atomic_set(&osb->alloc_stats.local_data, 0);
66010- atomic_set(&osb->alloc_stats.bitmap_data, 0);
66011- atomic_set(&osb->alloc_stats.bg_allocs, 0);
66012- atomic_set(&osb->alloc_stats.bg_extends, 0);
66013+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
66014+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
66015+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
66016+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
66017+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
66018
66019 /* Copy the blockcheck stats from the superblock probe */
66020 osb->osb_ecc_stats = *stats;
66021diff --git a/fs/open.c b/fs/open.c
66022index d6fd3ac..6ccf474 100644
66023--- a/fs/open.c
66024+++ b/fs/open.c
66025@@ -32,6 +32,8 @@
66026 #include <linux/dnotify.h>
66027 #include <linux/compat.h>
66028
66029+#define CREATE_TRACE_POINTS
66030+#include <trace/events/fs.h>
66031 #include "internal.h"
66032
66033 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
66034@@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length)
66035 error = locks_verify_truncate(inode, NULL, length);
66036 if (!error)
66037 error = security_path_truncate(path);
66038+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
66039+ error = -EACCES;
66040 if (!error)
66041 error = do_truncate(path->dentry, length, 0, NULL);
66042
66043@@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
66044 error = locks_verify_truncate(inode, f.file, length);
66045 if (!error)
66046 error = security_path_truncate(&f.file->f_path);
66047+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
66048+ error = -EACCES;
66049 if (!error)
66050 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
66051 sb_end_write(inode->i_sb);
66052@@ -380,6 +386,9 @@ retry:
66053 if (__mnt_is_readonly(path.mnt))
66054 res = -EROFS;
66055
66056+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
66057+ res = -EACCES;
66058+
66059 out_path_release:
66060 path_put(&path);
66061 if (retry_estale(res, lookup_flags)) {
66062@@ -411,6 +420,8 @@ retry:
66063 if (error)
66064 goto dput_and_out;
66065
66066+ gr_log_chdir(path.dentry, path.mnt);
66067+
66068 set_fs_pwd(current->fs, &path);
66069
66070 dput_and_out:
66071@@ -440,6 +451,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
66072 goto out_putf;
66073
66074 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
66075+
66076+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
66077+ error = -EPERM;
66078+
66079+ if (!error)
66080+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
66081+
66082 if (!error)
66083 set_fs_pwd(current->fs, &f.file->f_path);
66084 out_putf:
66085@@ -469,7 +487,13 @@ retry:
66086 if (error)
66087 goto dput_and_out;
66088
66089+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
66090+ goto dput_and_out;
66091+
66092 set_fs_root(current->fs, &path);
66093+
66094+ gr_handle_chroot_chdir(&path);
66095+
66096 error = 0;
66097 dput_and_out:
66098 path_put(&path);
66099@@ -493,6 +517,16 @@ static int chmod_common(struct path *path, umode_t mode)
66100 return error;
66101 retry_deleg:
66102 mutex_lock(&inode->i_mutex);
66103+
66104+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
66105+ error = -EACCES;
66106+ goto out_unlock;
66107+ }
66108+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
66109+ error = -EACCES;
66110+ goto out_unlock;
66111+ }
66112+
66113 error = security_path_chmod(path, mode);
66114 if (error)
66115 goto out_unlock;
66116@@ -558,6 +592,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
66117 uid = make_kuid(current_user_ns(), user);
66118 gid = make_kgid(current_user_ns(), group);
66119
66120+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
66121+ return -EACCES;
66122+
66123 newattrs.ia_valid = ATTR_CTIME;
66124 if (user != (uid_t) -1) {
66125 if (!uid_valid(uid))
66126@@ -983,6 +1020,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
66127 } else {
66128 fsnotify_open(f);
66129 fd_install(fd, f);
66130+ trace_do_sys_open(tmp->name, flags, mode);
66131 }
66132 }
66133 putname(tmp);
66134diff --git a/fs/pipe.c b/fs/pipe.c
66135index 21981e5..3d5f55c 100644
66136--- a/fs/pipe.c
66137+++ b/fs/pipe.c
66138@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
66139
66140 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
66141 {
66142- if (pipe->files)
66143+ if (atomic_read(&pipe->files))
66144 mutex_lock_nested(&pipe->mutex, subclass);
66145 }
66146
66147@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
66148
66149 void pipe_unlock(struct pipe_inode_info *pipe)
66150 {
66151- if (pipe->files)
66152+ if (atomic_read(&pipe->files))
66153 mutex_unlock(&pipe->mutex);
66154 }
66155 EXPORT_SYMBOL(pipe_unlock);
66156@@ -292,9 +292,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
66157 }
66158 if (bufs) /* More to do? */
66159 continue;
66160- if (!pipe->writers)
66161+ if (!atomic_read(&pipe->writers))
66162 break;
66163- if (!pipe->waiting_writers) {
66164+ if (!atomic_read(&pipe->waiting_writers)) {
66165 /* syscall merging: Usually we must not sleep
66166 * if O_NONBLOCK is set, or if we got some data.
66167 * But if a writer sleeps in kernel space, then
66168@@ -351,7 +351,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
66169
66170 __pipe_lock(pipe);
66171
66172- if (!pipe->readers) {
66173+ if (!atomic_read(&pipe->readers)) {
66174 send_sig(SIGPIPE, current, 0);
66175 ret = -EPIPE;
66176 goto out;
66177@@ -387,7 +387,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
66178 for (;;) {
66179 int bufs;
66180
66181- if (!pipe->readers) {
66182+ if (!atomic_read(&pipe->readers)) {
66183 send_sig(SIGPIPE, current, 0);
66184 if (!ret)
66185 ret = -EPIPE;
66186@@ -455,9 +455,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
66187 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
66188 do_wakeup = 0;
66189 }
66190- pipe->waiting_writers++;
66191+ atomic_inc(&pipe->waiting_writers);
66192 pipe_wait(pipe);
66193- pipe->waiting_writers--;
66194+ atomic_dec(&pipe->waiting_writers);
66195 }
66196 out:
66197 __pipe_unlock(pipe);
66198@@ -512,7 +512,7 @@ pipe_poll(struct file *filp, poll_table *wait)
66199 mask = 0;
66200 if (filp->f_mode & FMODE_READ) {
66201 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
66202- if (!pipe->writers && filp->f_version != pipe->w_counter)
66203+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
66204 mask |= POLLHUP;
66205 }
66206
66207@@ -522,7 +522,7 @@ pipe_poll(struct file *filp, poll_table *wait)
66208 * Most Unices do not set POLLERR for FIFOs but on Linux they
66209 * behave exactly like pipes for poll().
66210 */
66211- if (!pipe->readers)
66212+ if (!atomic_read(&pipe->readers))
66213 mask |= POLLERR;
66214 }
66215
66216@@ -534,7 +534,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
66217 int kill = 0;
66218
66219 spin_lock(&inode->i_lock);
66220- if (!--pipe->files) {
66221+ if (atomic_dec_and_test(&pipe->files)) {
66222 inode->i_pipe = NULL;
66223 kill = 1;
66224 }
66225@@ -551,11 +551,11 @@ pipe_release(struct inode *inode, struct file *file)
66226
66227 __pipe_lock(pipe);
66228 if (file->f_mode & FMODE_READ)
66229- pipe->readers--;
66230+ atomic_dec(&pipe->readers);
66231 if (file->f_mode & FMODE_WRITE)
66232- pipe->writers--;
66233+ atomic_dec(&pipe->writers);
66234
66235- if (pipe->readers || pipe->writers) {
66236+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
66237 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
66238 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
66239 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
66240@@ -620,7 +620,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
66241 kfree(pipe);
66242 }
66243
66244-static struct vfsmount *pipe_mnt __read_mostly;
66245+struct vfsmount *pipe_mnt __read_mostly;
66246
66247 /*
66248 * pipefs_dname() is called from d_path().
66249@@ -650,8 +650,9 @@ static struct inode * get_pipe_inode(void)
66250 goto fail_iput;
66251
66252 inode->i_pipe = pipe;
66253- pipe->files = 2;
66254- pipe->readers = pipe->writers = 1;
66255+ atomic_set(&pipe->files, 2);
66256+ atomic_set(&pipe->readers, 1);
66257+ atomic_set(&pipe->writers, 1);
66258 inode->i_fop = &pipefifo_fops;
66259
66260 /*
66261@@ -830,17 +831,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
66262 spin_lock(&inode->i_lock);
66263 if (inode->i_pipe) {
66264 pipe = inode->i_pipe;
66265- pipe->files++;
66266+ atomic_inc(&pipe->files);
66267 spin_unlock(&inode->i_lock);
66268 } else {
66269 spin_unlock(&inode->i_lock);
66270 pipe = alloc_pipe_info();
66271 if (!pipe)
66272 return -ENOMEM;
66273- pipe->files = 1;
66274+ atomic_set(&pipe->files, 1);
66275 spin_lock(&inode->i_lock);
66276 if (unlikely(inode->i_pipe)) {
66277- inode->i_pipe->files++;
66278+ atomic_inc(&inode->i_pipe->files);
66279 spin_unlock(&inode->i_lock);
66280 free_pipe_info(pipe);
66281 pipe = inode->i_pipe;
66282@@ -865,10 +866,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
66283 * opened, even when there is no process writing the FIFO.
66284 */
66285 pipe->r_counter++;
66286- if (pipe->readers++ == 0)
66287+ if (atomic_inc_return(&pipe->readers) == 1)
66288 wake_up_partner(pipe);
66289
66290- if (!is_pipe && !pipe->writers) {
66291+ if (!is_pipe && !atomic_read(&pipe->writers)) {
66292 if ((filp->f_flags & O_NONBLOCK)) {
66293 /* suppress POLLHUP until we have
66294 * seen a writer */
66295@@ -887,14 +888,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
66296 * errno=ENXIO when there is no process reading the FIFO.
66297 */
66298 ret = -ENXIO;
66299- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
66300+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
66301 goto err;
66302
66303 pipe->w_counter++;
66304- if (!pipe->writers++)
66305+ if (atomic_inc_return(&pipe->writers) == 1)
66306 wake_up_partner(pipe);
66307
66308- if (!is_pipe && !pipe->readers) {
66309+ if (!is_pipe && !atomic_read(&pipe->readers)) {
66310 if (wait_for_partner(pipe, &pipe->r_counter))
66311 goto err_wr;
66312 }
66313@@ -908,11 +909,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
66314 * the process can at least talk to itself.
66315 */
66316
66317- pipe->readers++;
66318- pipe->writers++;
66319+ atomic_inc(&pipe->readers);
66320+ atomic_inc(&pipe->writers);
66321 pipe->r_counter++;
66322 pipe->w_counter++;
66323- if (pipe->readers == 1 || pipe->writers == 1)
66324+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
66325 wake_up_partner(pipe);
66326 break;
66327
66328@@ -926,13 +927,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
66329 return 0;
66330
66331 err_rd:
66332- if (!--pipe->readers)
66333+ if (atomic_dec_and_test(&pipe->readers))
66334 wake_up_interruptible(&pipe->wait);
66335 ret = -ERESTARTSYS;
66336 goto err;
66337
66338 err_wr:
66339- if (!--pipe->writers)
66340+ if (atomic_dec_and_test(&pipe->writers))
66341 wake_up_interruptible(&pipe->wait);
66342 ret = -ERESTARTSYS;
66343 goto err;
66344diff --git a/fs/posix_acl.c b/fs/posix_acl.c
66345index 0855f77..6787d50 100644
66346--- a/fs/posix_acl.c
66347+++ b/fs/posix_acl.c
66348@@ -20,6 +20,7 @@
66349 #include <linux/xattr.h>
66350 #include <linux/export.h>
66351 #include <linux/user_namespace.h>
66352+#include <linux/grsecurity.h>
66353
66354 struct posix_acl **acl_by_type(struct inode *inode, int type)
66355 {
66356@@ -277,7 +278,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
66357 }
66358 }
66359 if (mode_p)
66360- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
66361+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
66362 return not_equiv;
66363 }
66364 EXPORT_SYMBOL(posix_acl_equiv_mode);
66365@@ -427,7 +428,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
66366 mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
66367 }
66368
66369- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
66370+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
66371 return not_equiv;
66372 }
66373
66374@@ -485,6 +486,8 @@ __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
66375 struct posix_acl *clone = posix_acl_clone(*acl, gfp);
66376 int err = -ENOMEM;
66377 if (clone) {
66378+ *mode_p &= ~gr_acl_umask();
66379+
66380 err = posix_acl_create_masq(clone, mode_p);
66381 if (err < 0) {
66382 posix_acl_release(clone);
66383@@ -659,11 +662,12 @@ struct posix_acl *
66384 posix_acl_from_xattr(struct user_namespace *user_ns,
66385 const void *value, size_t size)
66386 {
66387- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
66388- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
66389+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
66390+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
66391 int count;
66392 struct posix_acl *acl;
66393 struct posix_acl_entry *acl_e;
66394+ umode_t umask = gr_acl_umask();
66395
66396 if (!value)
66397 return NULL;
66398@@ -689,12 +693,18 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
66399
66400 switch(acl_e->e_tag) {
66401 case ACL_USER_OBJ:
66402+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
66403+ break;
66404 case ACL_GROUP_OBJ:
66405 case ACL_MASK:
66406+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
66407+ break;
66408 case ACL_OTHER:
66409+ acl_e->e_perm &= ~(umask & S_IRWXO);
66410 break;
66411
66412 case ACL_USER:
66413+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
66414 acl_e->e_uid =
66415 make_kuid(user_ns,
66416 le32_to_cpu(entry->e_id));
66417@@ -702,6 +712,7 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
66418 goto fail;
66419 break;
66420 case ACL_GROUP:
66421+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
66422 acl_e->e_gid =
66423 make_kgid(user_ns,
66424 le32_to_cpu(entry->e_id));
66425diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
66426index 2183fcf..3c32a98 100644
66427--- a/fs/proc/Kconfig
66428+++ b/fs/proc/Kconfig
66429@@ -30,7 +30,7 @@ config PROC_FS
66430
66431 config PROC_KCORE
66432 bool "/proc/kcore support" if !ARM
66433- depends on PROC_FS && MMU
66434+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
66435 help
66436 Provides a virtual ELF core file of the live kernel. This can
66437 be read with gdb and other ELF tools. No modifications can be
66438@@ -38,8 +38,8 @@ config PROC_KCORE
66439
66440 config PROC_VMCORE
66441 bool "/proc/vmcore support"
66442- depends on PROC_FS && CRASH_DUMP
66443- default y
66444+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
66445+ default n
66446 help
66447 Exports the dump image of crashed kernel in ELF format.
66448
66449@@ -63,8 +63,8 @@ config PROC_SYSCTL
66450 limited in memory.
66451
66452 config PROC_PAGE_MONITOR
66453- default y
66454- depends on PROC_FS && MMU
66455+ default n
66456+ depends on PROC_FS && MMU && !GRKERNSEC
66457 bool "Enable /proc page monitoring" if EXPERT
66458 help
66459 Various /proc files exist to monitor process memory utilization:
66460diff --git a/fs/proc/array.c b/fs/proc/array.c
66461index cd3653e..9b9b79a 100644
66462--- a/fs/proc/array.c
66463+++ b/fs/proc/array.c
66464@@ -60,6 +60,7 @@
66465 #include <linux/tty.h>
66466 #include <linux/string.h>
66467 #include <linux/mman.h>
66468+#include <linux/grsecurity.h>
66469 #include <linux/proc_fs.h>
66470 #include <linux/ioport.h>
66471 #include <linux/uaccess.h>
66472@@ -347,6 +348,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
66473 seq_putc(m, '\n');
66474 }
66475
66476+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66477+static inline void task_pax(struct seq_file *m, struct task_struct *p)
66478+{
66479+ if (p->mm)
66480+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
66481+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
66482+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
66483+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
66484+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
66485+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
66486+ else
66487+ seq_printf(m, "PaX:\t-----\n");
66488+}
66489+#endif
66490+
66491 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
66492 struct pid *pid, struct task_struct *task)
66493 {
66494@@ -365,9 +381,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
66495 task_cpus_allowed(m, task);
66496 cpuset_task_status_allowed(m, task);
66497 task_context_switch_counts(m, task);
66498+
66499+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66500+ task_pax(m, task);
66501+#endif
66502+
66503+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
66504+ task_grsec_rbac(m, task);
66505+#endif
66506+
66507 return 0;
66508 }
66509
66510+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66511+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
66512+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
66513+ _mm->pax_flags & MF_PAX_SEGMEXEC))
66514+#endif
66515+
66516 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66517 struct pid *pid, struct task_struct *task, int whole)
66518 {
66519@@ -389,6 +420,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66520 char tcomm[sizeof(task->comm)];
66521 unsigned long flags;
66522
66523+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66524+ if (current->exec_id != m->exec_id) {
66525+ gr_log_badprocpid("stat");
66526+ return 0;
66527+ }
66528+#endif
66529+
66530 state = *get_task_state(task);
66531 vsize = eip = esp = 0;
66532 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
66533@@ -459,6 +497,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66534 gtime = task_gtime(task);
66535 }
66536
66537+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66538+ if (PAX_RAND_FLAGS(mm)) {
66539+ eip = 0;
66540+ esp = 0;
66541+ wchan = 0;
66542+ }
66543+#endif
66544+#ifdef CONFIG_GRKERNSEC_HIDESYM
66545+ wchan = 0;
66546+ eip =0;
66547+ esp =0;
66548+#endif
66549+
66550 /* scale priority and nice values from timeslices to -20..20 */
66551 /* to make it look like a "normal" Unix priority/nice value */
66552 priority = task_prio(task);
66553@@ -490,9 +541,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66554 seq_put_decimal_ull(m, ' ', vsize);
66555 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
66556 seq_put_decimal_ull(m, ' ', rsslim);
66557+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66558+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
66559+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
66560+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
66561+#else
66562 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
66563 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
66564 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
66565+#endif
66566 seq_put_decimal_ull(m, ' ', esp);
66567 seq_put_decimal_ull(m, ' ', eip);
66568 /* The signal information here is obsolete.
66569@@ -514,7 +571,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
66570 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
66571 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
66572
66573- if (mm && permitted) {
66574+ if (mm && permitted
66575+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66576+ && !PAX_RAND_FLAGS(mm)
66577+#endif
66578+ ) {
66579 seq_put_decimal_ull(m, ' ', mm->start_data);
66580 seq_put_decimal_ull(m, ' ', mm->end_data);
66581 seq_put_decimal_ull(m, ' ', mm->start_brk);
66582@@ -552,8 +613,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
66583 struct pid *pid, struct task_struct *task)
66584 {
66585 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
66586- struct mm_struct *mm = get_task_mm(task);
66587+ struct mm_struct *mm;
66588
66589+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66590+ if (current->exec_id != m->exec_id) {
66591+ gr_log_badprocpid("statm");
66592+ return 0;
66593+ }
66594+#endif
66595+ mm = get_task_mm(task);
66596 if (mm) {
66597 size = task_statm(mm, &shared, &text, &data, &resident);
66598 mmput(mm);
66599@@ -576,6 +644,20 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
66600 return 0;
66601 }
66602
66603+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
66604+int proc_pid_ipaddr(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task)
66605+{
66606+ unsigned long flags;
66607+ u32 curr_ip = 0;
66608+
66609+ if (lock_task_sighand(task, &flags)) {
66610+ curr_ip = task->signal->curr_ip;
66611+ unlock_task_sighand(task, &flags);
66612+ }
66613+ return seq_printf(m, "%pI4\n", &curr_ip);
66614+}
66615+#endif
66616+
66617 #ifdef CONFIG_CHECKPOINT_RESTORE
66618 static struct pid *
66619 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
66620diff --git a/fs/proc/base.c b/fs/proc/base.c
66621index baf852b..03fe930 100644
66622--- a/fs/proc/base.c
66623+++ b/fs/proc/base.c
66624@@ -113,6 +113,14 @@ struct pid_entry {
66625 union proc_op op;
66626 };
66627
66628+struct getdents_callback {
66629+ struct linux_dirent __user * current_dir;
66630+ struct linux_dirent __user * previous;
66631+ struct file * file;
66632+ int count;
66633+ int error;
66634+};
66635+
66636 #define NOD(NAME, MODE, IOP, FOP, OP) { \
66637 .name = (NAME), \
66638 .len = sizeof(NAME) - 1, \
66639@@ -208,12 +216,28 @@ static int proc_pid_cmdline(struct seq_file *m, struct pid_namespace *ns,
66640 return 0;
66641 }
66642
66643+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66644+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
66645+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
66646+ _mm->pax_flags & MF_PAX_SEGMEXEC))
66647+#endif
66648+
66649 static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
66650 struct pid *pid, struct task_struct *task)
66651 {
66652 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
66653 if (mm && !IS_ERR(mm)) {
66654 unsigned int nwords = 0;
66655+
66656+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66657+ /* allow if we're currently ptracing this task */
66658+ if (PAX_RAND_FLAGS(mm) &&
66659+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
66660+ mmput(mm);
66661+ return 0;
66662+ }
66663+#endif
66664+
66665 do {
66666 nwords += 2;
66667 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
66668@@ -225,7 +249,7 @@ static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
66669 }
66670
66671
66672-#ifdef CONFIG_KALLSYMS
66673+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66674 /*
66675 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
66676 * Returns the resolved symbol. If that fails, simply return the address.
66677@@ -265,7 +289,7 @@ static void unlock_trace(struct task_struct *task)
66678 mutex_unlock(&task->signal->cred_guard_mutex);
66679 }
66680
66681-#ifdef CONFIG_STACKTRACE
66682+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66683
66684 #define MAX_STACK_TRACE_DEPTH 64
66685
66686@@ -487,7 +511,7 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
66687 return 0;
66688 }
66689
66690-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
66691+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
66692 static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
66693 struct pid *pid, struct task_struct *task)
66694 {
66695@@ -517,7 +541,7 @@ static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
66696 /************************************************************************/
66697
66698 /* permission checks */
66699-static int proc_fd_access_allowed(struct inode *inode)
66700+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
66701 {
66702 struct task_struct *task;
66703 int allowed = 0;
66704@@ -527,7 +551,10 @@ static int proc_fd_access_allowed(struct inode *inode)
66705 */
66706 task = get_proc_task(inode);
66707 if (task) {
66708- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
66709+ if (log)
66710+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
66711+ else
66712+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
66713 put_task_struct(task);
66714 }
66715 return allowed;
66716@@ -558,10 +585,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
66717 struct task_struct *task,
66718 int hide_pid_min)
66719 {
66720+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66721+ return false;
66722+
66723+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66724+ rcu_read_lock();
66725+ {
66726+ const struct cred *tmpcred = current_cred();
66727+ const struct cred *cred = __task_cred(task);
66728+
66729+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
66730+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66731+ || in_group_p(grsec_proc_gid)
66732+#endif
66733+ ) {
66734+ rcu_read_unlock();
66735+ return true;
66736+ }
66737+ }
66738+ rcu_read_unlock();
66739+
66740+ if (!pid->hide_pid)
66741+ return false;
66742+#endif
66743+
66744 if (pid->hide_pid < hide_pid_min)
66745 return true;
66746 if (in_group_p(pid->pid_gid))
66747 return true;
66748+
66749 return ptrace_may_access(task, PTRACE_MODE_READ);
66750 }
66751
66752@@ -579,7 +631,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
66753 put_task_struct(task);
66754
66755 if (!has_perms) {
66756+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66757+ {
66758+#else
66759 if (pid->hide_pid == 2) {
66760+#endif
66761 /*
66762 * Let's make getdents(), stat(), and open()
66763 * consistent with each other. If a process
66764@@ -640,6 +696,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
66765 if (!task)
66766 return -ESRCH;
66767
66768+ if (gr_acl_handle_procpidmem(task)) {
66769+ put_task_struct(task);
66770+ return -EPERM;
66771+ }
66772+
66773 mm = mm_access(task, mode);
66774 put_task_struct(task);
66775
66776@@ -655,6 +716,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
66777
66778 file->private_data = mm;
66779
66780+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66781+ file->f_version = current->exec_id;
66782+#endif
66783+
66784 return 0;
66785 }
66786
66787@@ -676,6 +741,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
66788 ssize_t copied;
66789 char *page;
66790
66791+#ifdef CONFIG_GRKERNSEC
66792+ if (write)
66793+ return -EPERM;
66794+#endif
66795+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66796+ if (file->f_version != current->exec_id) {
66797+ gr_log_badprocpid("mem");
66798+ return 0;
66799+ }
66800+#endif
66801+
66802 if (!mm)
66803 return 0;
66804
66805@@ -688,7 +764,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
66806 goto free;
66807
66808 while (count > 0) {
66809- int this_len = min_t(int, count, PAGE_SIZE);
66810+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
66811
66812 if (write && copy_from_user(page, buf, this_len)) {
66813 copied = -EFAULT;
66814@@ -780,6 +856,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
66815 if (!mm)
66816 return 0;
66817
66818+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66819+ if (file->f_version != current->exec_id) {
66820+ gr_log_badprocpid("environ");
66821+ return 0;
66822+ }
66823+#endif
66824+
66825 page = (char *)__get_free_page(GFP_TEMPORARY);
66826 if (!page)
66827 return -ENOMEM;
66828@@ -789,7 +872,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
66829 goto free;
66830 while (count > 0) {
66831 size_t this_len, max_len;
66832- int retval;
66833+ ssize_t retval;
66834
66835 if (src >= (mm->env_end - mm->env_start))
66836 break;
66837@@ -1403,7 +1486,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
66838 int error = -EACCES;
66839
66840 /* Are we allowed to snoop on the tasks file descriptors? */
66841- if (!proc_fd_access_allowed(inode))
66842+ if (!proc_fd_access_allowed(inode, 0))
66843 goto out;
66844
66845 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
66846@@ -1447,8 +1530,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
66847 struct path path;
66848
66849 /* Are we allowed to snoop on the tasks file descriptors? */
66850- if (!proc_fd_access_allowed(inode))
66851- goto out;
66852+ /* logging this is needed for learning on chromium to work properly,
66853+ but we don't want to flood the logs from 'ps' which does a readlink
66854+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
66855+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
66856+ */
66857+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
66858+ if (!proc_fd_access_allowed(inode,0))
66859+ goto out;
66860+ } else {
66861+ if (!proc_fd_access_allowed(inode,1))
66862+ goto out;
66863+ }
66864
66865 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
66866 if (error)
66867@@ -1498,7 +1591,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
66868 rcu_read_lock();
66869 cred = __task_cred(task);
66870 inode->i_uid = cred->euid;
66871+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66872+ inode->i_gid = grsec_proc_gid;
66873+#else
66874 inode->i_gid = cred->egid;
66875+#endif
66876 rcu_read_unlock();
66877 }
66878 security_task_to_inode(task, inode);
66879@@ -1534,10 +1631,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
66880 return -ENOENT;
66881 }
66882 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
66883+#ifdef CONFIG_GRKERNSEC_PROC_USER
66884+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
66885+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66886+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
66887+#endif
66888 task_dumpable(task)) {
66889 cred = __task_cred(task);
66890 stat->uid = cred->euid;
66891+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66892+ stat->gid = grsec_proc_gid;
66893+#else
66894 stat->gid = cred->egid;
66895+#endif
66896 }
66897 }
66898 rcu_read_unlock();
66899@@ -1575,11 +1681,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
66900
66901 if (task) {
66902 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
66903+#ifdef CONFIG_GRKERNSEC_PROC_USER
66904+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
66905+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66906+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
66907+#endif
66908 task_dumpable(task)) {
66909 rcu_read_lock();
66910 cred = __task_cred(task);
66911 inode->i_uid = cred->euid;
66912+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66913+ inode->i_gid = grsec_proc_gid;
66914+#else
66915 inode->i_gid = cred->egid;
66916+#endif
66917 rcu_read_unlock();
66918 } else {
66919 inode->i_uid = GLOBAL_ROOT_UID;
66920@@ -2114,6 +2229,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
66921 if (!task)
66922 goto out_no_task;
66923
66924+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66925+ goto out;
66926+
66927 /*
66928 * Yes, it does not scale. And it should not. Don't add
66929 * new entries into /proc/<tgid>/ without very good reasons.
66930@@ -2144,6 +2262,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
66931 if (!task)
66932 return -ENOENT;
66933
66934+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66935+ goto out;
66936+
66937 if (!dir_emit_dots(file, ctx))
66938 goto out;
66939
66940@@ -2535,7 +2656,7 @@ static const struct pid_entry tgid_base_stuff[] = {
66941 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
66942 #endif
66943 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
66944-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
66945+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
66946 ONE("syscall", S_IRUSR, proc_pid_syscall),
66947 #endif
66948 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
66949@@ -2560,10 +2681,10 @@ static const struct pid_entry tgid_base_stuff[] = {
66950 #ifdef CONFIG_SECURITY
66951 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
66952 #endif
66953-#ifdef CONFIG_KALLSYMS
66954+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66955 ONE("wchan", S_IRUGO, proc_pid_wchan),
66956 #endif
66957-#ifdef CONFIG_STACKTRACE
66958+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66959 ONE("stack", S_IRUSR, proc_pid_stack),
66960 #endif
66961 #ifdef CONFIG_SCHEDSTATS
66962@@ -2597,6 +2718,9 @@ static const struct pid_entry tgid_base_stuff[] = {
66963 #ifdef CONFIG_HARDWALL
66964 ONE("hardwall", S_IRUGO, proc_pid_hardwall),
66965 #endif
66966+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
66967+ ONE("ipaddr", S_IRUSR, proc_pid_ipaddr),
66968+#endif
66969 #ifdef CONFIG_USER_NS
66970 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
66971 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
66972@@ -2727,7 +2851,14 @@ static int proc_pid_instantiate(struct inode *dir,
66973 if (!inode)
66974 goto out;
66975
66976+#ifdef CONFIG_GRKERNSEC_PROC_USER
66977+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
66978+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66979+ inode->i_gid = grsec_proc_gid;
66980+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
66981+#else
66982 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
66983+#endif
66984 inode->i_op = &proc_tgid_base_inode_operations;
66985 inode->i_fop = &proc_tgid_base_operations;
66986 inode->i_flags|=S_IMMUTABLE;
66987@@ -2765,7 +2896,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
66988 if (!task)
66989 goto out;
66990
66991+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66992+ goto out_put_task;
66993+
66994 result = proc_pid_instantiate(dir, dentry, task, NULL);
66995+out_put_task:
66996 put_task_struct(task);
66997 out:
66998 return ERR_PTR(result);
66999@@ -2879,7 +3014,7 @@ static const struct pid_entry tid_base_stuff[] = {
67000 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
67001 #endif
67002 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
67003-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
67004+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
67005 ONE("syscall", S_IRUSR, proc_pid_syscall),
67006 #endif
67007 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
67008@@ -2906,10 +3041,10 @@ static const struct pid_entry tid_base_stuff[] = {
67009 #ifdef CONFIG_SECURITY
67010 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
67011 #endif
67012-#ifdef CONFIG_KALLSYMS
67013+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67014 ONE("wchan", S_IRUGO, proc_pid_wchan),
67015 #endif
67016-#ifdef CONFIG_STACKTRACE
67017+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67018 ONE("stack", S_IRUSR, proc_pid_stack),
67019 #endif
67020 #ifdef CONFIG_SCHEDSTATS
67021diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
67022index cbd82df..c0407d2 100644
67023--- a/fs/proc/cmdline.c
67024+++ b/fs/proc/cmdline.c
67025@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
67026
67027 static int __init proc_cmdline_init(void)
67028 {
67029+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67030+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
67031+#else
67032 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
67033+#endif
67034 return 0;
67035 }
67036 fs_initcall(proc_cmdline_init);
67037diff --git a/fs/proc/devices.c b/fs/proc/devices.c
67038index 50493ed..248166b 100644
67039--- a/fs/proc/devices.c
67040+++ b/fs/proc/devices.c
67041@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
67042
67043 static int __init proc_devices_init(void)
67044 {
67045+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67046+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
67047+#else
67048 proc_create("devices", 0, NULL, &proc_devinfo_operations);
67049+#endif
67050 return 0;
67051 }
67052 fs_initcall(proc_devices_init);
67053diff --git a/fs/proc/fd.c b/fs/proc/fd.c
67054index 955bb55..71948bd 100644
67055--- a/fs/proc/fd.c
67056+++ b/fs/proc/fd.c
67057@@ -26,7 +26,8 @@ static int seq_show(struct seq_file *m, void *v)
67058 if (!task)
67059 return -ENOENT;
67060
67061- files = get_files_struct(task);
67062+ if (!gr_acl_handle_procpidmem(task))
67063+ files = get_files_struct(task);
67064 put_task_struct(task);
67065
67066 if (files) {
67067@@ -285,11 +286,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
67068 */
67069 int proc_fd_permission(struct inode *inode, int mask)
67070 {
67071+ struct task_struct *task;
67072 int rv = generic_permission(inode, mask);
67073- if (rv == 0)
67074- return 0;
67075+
67076 if (task_tgid(current) == proc_pid(inode))
67077 rv = 0;
67078+
67079+ task = get_proc_task(inode);
67080+ if (task == NULL)
67081+ return rv;
67082+
67083+ if (gr_acl_handle_procpidmem(task))
67084+ rv = -EACCES;
67085+
67086+ put_task_struct(task);
67087+
67088 return rv;
67089 }
67090
67091diff --git a/fs/proc/generic.c b/fs/proc/generic.c
67092index 317b726..e329aed 100644
67093--- a/fs/proc/generic.c
67094+++ b/fs/proc/generic.c
67095@@ -23,6 +23,7 @@
67096 #include <linux/bitops.h>
67097 #include <linux/spinlock.h>
67098 #include <linux/completion.h>
67099+#include <linux/grsecurity.h>
67100 #include <asm/uaccess.h>
67101
67102 #include "internal.h"
67103@@ -207,6 +208,15 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
67104 return proc_lookup_de(PDE(dir), dir, dentry);
67105 }
67106
67107+struct dentry *proc_lookup_restrict(struct inode *dir, struct dentry *dentry,
67108+ unsigned int flags)
67109+{
67110+ if (gr_proc_is_restricted())
67111+ return ERR_PTR(-EACCES);
67112+
67113+ return proc_lookup_de(PDE(dir), dir, dentry);
67114+}
67115+
67116 /*
67117 * This returns non-zero if at EOF, so that the /proc
67118 * root directory can use this and check if it should
67119@@ -264,6 +274,16 @@ int proc_readdir(struct file *file, struct dir_context *ctx)
67120 return proc_readdir_de(PDE(inode), file, ctx);
67121 }
67122
67123+int proc_readdir_restrict(struct file *file, struct dir_context *ctx)
67124+{
67125+ struct inode *inode = file_inode(file);
67126+
67127+ if (gr_proc_is_restricted())
67128+ return -EACCES;
67129+
67130+ return proc_readdir_de(PDE(inode), file, ctx);
67131+}
67132+
67133 /*
67134 * These are the generic /proc directory operations. They
67135 * use the in-memory "struct proc_dir_entry" tree to parse
67136@@ -275,6 +295,12 @@ static const struct file_operations proc_dir_operations = {
67137 .iterate = proc_readdir,
67138 };
67139
67140+static const struct file_operations proc_dir_restricted_operations = {
67141+ .llseek = generic_file_llseek,
67142+ .read = generic_read_dir,
67143+ .iterate = proc_readdir_restrict,
67144+};
67145+
67146 /*
67147 * proc directories can do almost nothing..
67148 */
67149@@ -284,6 +310,12 @@ static const struct inode_operations proc_dir_inode_operations = {
67150 .setattr = proc_notify_change,
67151 };
67152
67153+static const struct inode_operations proc_dir_restricted_inode_operations = {
67154+ .lookup = proc_lookup_restrict,
67155+ .getattr = proc_getattr,
67156+ .setattr = proc_notify_change,
67157+};
67158+
67159 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
67160 {
67161 struct proc_dir_entry *tmp;
67162@@ -294,8 +326,13 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
67163 return ret;
67164
67165 if (S_ISDIR(dp->mode)) {
67166- dp->proc_fops = &proc_dir_operations;
67167- dp->proc_iops = &proc_dir_inode_operations;
67168+ if (dp->restricted) {
67169+ dp->proc_fops = &proc_dir_restricted_operations;
67170+ dp->proc_iops = &proc_dir_restricted_inode_operations;
67171+ } else {
67172+ dp->proc_fops = &proc_dir_operations;
67173+ dp->proc_iops = &proc_dir_inode_operations;
67174+ }
67175 dir->nlink++;
67176 } else if (S_ISLNK(dp->mode)) {
67177 dp->proc_iops = &proc_link_inode_operations;
67178@@ -407,6 +444,27 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
67179 }
67180 EXPORT_SYMBOL_GPL(proc_mkdir_data);
67181
67182+struct proc_dir_entry *proc_mkdir_data_restrict(const char *name, umode_t mode,
67183+ struct proc_dir_entry *parent, void *data)
67184+{
67185+ struct proc_dir_entry *ent;
67186+
67187+ if (mode == 0)
67188+ mode = S_IRUGO | S_IXUGO;
67189+
67190+ ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
67191+ if (ent) {
67192+ ent->data = data;
67193+ ent->restricted = 1;
67194+ if (proc_register(parent, ent) < 0) {
67195+ kfree(ent);
67196+ ent = NULL;
67197+ }
67198+ }
67199+ return ent;
67200+}
67201+EXPORT_SYMBOL_GPL(proc_mkdir_data_restrict);
67202+
67203 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
67204 struct proc_dir_entry *parent)
67205 {
67206@@ -421,6 +479,13 @@ struct proc_dir_entry *proc_mkdir(const char *name,
67207 }
67208 EXPORT_SYMBOL(proc_mkdir);
67209
67210+struct proc_dir_entry *proc_mkdir_restrict(const char *name,
67211+ struct proc_dir_entry *parent)
67212+{
67213+ return proc_mkdir_data_restrict(name, 0, parent, NULL);
67214+}
67215+EXPORT_SYMBOL(proc_mkdir_restrict);
67216+
67217 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
67218 struct proc_dir_entry *parent,
67219 const struct file_operations *proc_fops,
67220diff --git a/fs/proc/inode.c b/fs/proc/inode.c
67221index 333080d..0a35ec4 100644
67222--- a/fs/proc/inode.c
67223+++ b/fs/proc/inode.c
67224@@ -23,11 +23,17 @@
67225 #include <linux/slab.h>
67226 #include <linux/mount.h>
67227 #include <linux/magic.h>
67228+#include <linux/grsecurity.h>
67229
67230 #include <asm/uaccess.h>
67231
67232 #include "internal.h"
67233
67234+#ifdef CONFIG_PROC_SYSCTL
67235+extern const struct inode_operations proc_sys_inode_operations;
67236+extern const struct inode_operations proc_sys_dir_operations;
67237+#endif
67238+
67239 static void proc_evict_inode(struct inode *inode)
67240 {
67241 struct proc_dir_entry *de;
67242@@ -55,6 +61,13 @@ static void proc_evict_inode(struct inode *inode)
67243 ns = PROC_I(inode)->ns.ns;
67244 if (ns_ops && ns)
67245 ns_ops->put(ns);
67246+
67247+#ifdef CONFIG_PROC_SYSCTL
67248+ if (inode->i_op == &proc_sys_inode_operations ||
67249+ inode->i_op == &proc_sys_dir_operations)
67250+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
67251+#endif
67252+
67253 }
67254
67255 static struct kmem_cache * proc_inode_cachep;
67256@@ -413,7 +426,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
67257 if (de->mode) {
67258 inode->i_mode = de->mode;
67259 inode->i_uid = de->uid;
67260+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67261+ inode->i_gid = grsec_proc_gid;
67262+#else
67263 inode->i_gid = de->gid;
67264+#endif
67265 }
67266 if (de->size)
67267 inode->i_size = de->size;
67268diff --git a/fs/proc/internal.h b/fs/proc/internal.h
67269index 7da13e4..68d0981 100644
67270--- a/fs/proc/internal.h
67271+++ b/fs/proc/internal.h
67272@@ -46,9 +46,10 @@ struct proc_dir_entry {
67273 struct completion *pde_unload_completion;
67274 struct list_head pde_openers; /* who did ->open, but not ->release */
67275 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
67276+ u8 restricted; /* a directory in /proc/net that should be restricted via GRKERNSEC_PROC */
67277 u8 namelen;
67278 char name[];
67279-};
67280+} __randomize_layout;
67281
67282 union proc_op {
67283 int (*proc_get_link)(struct dentry *, struct path *);
67284@@ -66,7 +67,7 @@ struct proc_inode {
67285 struct ctl_table *sysctl_entry;
67286 struct proc_ns ns;
67287 struct inode vfs_inode;
67288-};
67289+} __randomize_layout;
67290
67291 /*
67292 * General functions
67293@@ -154,6 +155,10 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
67294 struct pid *, struct task_struct *);
67295 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
67296 struct pid *, struct task_struct *);
67297+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
67298+extern int proc_pid_ipaddr(struct seq_file *, struct pid_namespace *,
67299+ struct pid *, struct task_struct *);
67300+#endif
67301
67302 /*
67303 * base.c
67304@@ -178,9 +183,11 @@ extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, i
67305 * generic.c
67306 */
67307 extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
67308+extern struct dentry *proc_lookup_restrict(struct inode *, struct dentry *, unsigned int);
67309 extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *,
67310 struct dentry *);
67311 extern int proc_readdir(struct file *, struct dir_context *);
67312+extern int proc_readdir_restrict(struct file *, struct dir_context *);
67313 extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *);
67314
67315 static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
67316diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
67317index a352d57..cb94a5c 100644
67318--- a/fs/proc/interrupts.c
67319+++ b/fs/proc/interrupts.c
67320@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
67321
67322 static int __init proc_interrupts_init(void)
67323 {
67324+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67325+ proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
67326+#else
67327 proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
67328+#endif
67329 return 0;
67330 }
67331 fs_initcall(proc_interrupts_init);
67332diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
67333index 6df8d07..3321060 100644
67334--- a/fs/proc/kcore.c
67335+++ b/fs/proc/kcore.c
67336@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
67337 * the addresses in the elf_phdr on our list.
67338 */
67339 start = kc_offset_to_vaddr(*fpos - elf_buflen);
67340- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
67341+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
67342+ if (tsz > buflen)
67343 tsz = buflen;
67344-
67345+
67346 while (buflen) {
67347 struct kcore_list *m;
67348
67349@@ -514,20 +515,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
67350 kfree(elf_buf);
67351 } else {
67352 if (kern_addr_valid(start)) {
67353- unsigned long n;
67354+ char *elf_buf;
67355+ mm_segment_t oldfs;
67356
67357- n = copy_to_user(buffer, (char *)start, tsz);
67358- /*
67359- * We cannot distinguish between fault on source
67360- * and fault on destination. When this happens
67361- * we clear too and hope it will trigger the
67362- * EFAULT again.
67363- */
67364- if (n) {
67365- if (clear_user(buffer + tsz - n,
67366- n))
67367+ elf_buf = kmalloc(tsz, GFP_KERNEL);
67368+ if (!elf_buf)
67369+ return -ENOMEM;
67370+ oldfs = get_fs();
67371+ set_fs(KERNEL_DS);
67372+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
67373+ set_fs(oldfs);
67374+ if (copy_to_user(buffer, elf_buf, tsz)) {
67375+ kfree(elf_buf);
67376 return -EFAULT;
67377+ }
67378 }
67379+ set_fs(oldfs);
67380+ kfree(elf_buf);
67381 } else {
67382 if (clear_user(buffer, tsz))
67383 return -EFAULT;
67384@@ -547,6 +551,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
67385
67386 static int open_kcore(struct inode *inode, struct file *filp)
67387 {
67388+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
67389+ return -EPERM;
67390+#endif
67391 if (!capable(CAP_SYS_RAWIO))
67392 return -EPERM;
67393 if (kcore_need_update)
67394diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
67395index aa1eee0..03dda72 100644
67396--- a/fs/proc/meminfo.c
67397+++ b/fs/proc/meminfo.c
67398@@ -187,7 +187,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
67399 vmi.used >> 10,
67400 vmi.largest_chunk >> 10
67401 #ifdef CONFIG_MEMORY_FAILURE
67402- ,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
67403+ ,atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
67404 #endif
67405 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
67406 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
67407diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
67408index d4a3574..b421ce9 100644
67409--- a/fs/proc/nommu.c
67410+++ b/fs/proc/nommu.c
67411@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
67412
67413 if (file) {
67414 seq_pad(m, ' ');
67415- seq_path(m, &file->f_path, "");
67416+ seq_path(m, &file->f_path, "\n\\");
67417 }
67418
67419 seq_putc(m, '\n');
67420diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
67421index a63af3e..b4f262a 100644
67422--- a/fs/proc/proc_net.c
67423+++ b/fs/proc/proc_net.c
67424@@ -23,9 +23,27 @@
67425 #include <linux/nsproxy.h>
67426 #include <net/net_namespace.h>
67427 #include <linux/seq_file.h>
67428+#include <linux/grsecurity.h>
67429
67430 #include "internal.h"
67431
67432+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
67433+static struct seq_operations *ipv6_seq_ops_addr;
67434+
67435+void register_ipv6_seq_ops_addr(struct seq_operations *addr)
67436+{
67437+ ipv6_seq_ops_addr = addr;
67438+}
67439+
67440+void unregister_ipv6_seq_ops_addr(void)
67441+{
67442+ ipv6_seq_ops_addr = NULL;
67443+}
67444+
67445+EXPORT_SYMBOL_GPL(register_ipv6_seq_ops_addr);
67446+EXPORT_SYMBOL_GPL(unregister_ipv6_seq_ops_addr);
67447+#endif
67448+
67449 static inline struct net *PDE_NET(struct proc_dir_entry *pde)
67450 {
67451 return pde->parent->data;
67452@@ -36,6 +54,8 @@ static struct net *get_proc_net(const struct inode *inode)
67453 return maybe_get_net(PDE_NET(PDE(inode)));
67454 }
67455
67456+extern const struct seq_operations dev_seq_ops;
67457+
67458 int seq_open_net(struct inode *ino, struct file *f,
67459 const struct seq_operations *ops, int size)
67460 {
67461@@ -44,6 +64,14 @@ int seq_open_net(struct inode *ino, struct file *f,
67462
67463 BUG_ON(size < sizeof(*p));
67464
67465+ /* only permit access to /proc/net/dev */
67466+ if (
67467+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
67468+ ops != ipv6_seq_ops_addr &&
67469+#endif
67470+ ops != &dev_seq_ops && gr_proc_is_restricted())
67471+ return -EACCES;
67472+
67473 net = get_proc_net(ino);
67474 if (net == NULL)
67475 return -ENXIO;
67476@@ -66,6 +94,9 @@ int single_open_net(struct inode *inode, struct file *file,
67477 int err;
67478 struct net *net;
67479
67480+ if (gr_proc_is_restricted())
67481+ return -EACCES;
67482+
67483 err = -ENXIO;
67484 net = get_proc_net(inode);
67485 if (net == NULL)
67486diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
67487index f92d5dd..26398ac 100644
67488--- a/fs/proc/proc_sysctl.c
67489+++ b/fs/proc/proc_sysctl.c
67490@@ -11,13 +11,21 @@
67491 #include <linux/namei.h>
67492 #include <linux/mm.h>
67493 #include <linux/module.h>
67494+#include <linux/nsproxy.h>
67495+#ifdef CONFIG_GRKERNSEC
67496+#include <net/net_namespace.h>
67497+#endif
67498 #include "internal.h"
67499
67500+extern int gr_handle_chroot_sysctl(const int op);
67501+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
67502+ const int op);
67503+
67504 static const struct dentry_operations proc_sys_dentry_operations;
67505 static const struct file_operations proc_sys_file_operations;
67506-static const struct inode_operations proc_sys_inode_operations;
67507+const struct inode_operations proc_sys_inode_operations;
67508 static const struct file_operations proc_sys_dir_file_operations;
67509-static const struct inode_operations proc_sys_dir_operations;
67510+const struct inode_operations proc_sys_dir_operations;
67511
67512 void proc_sys_poll_notify(struct ctl_table_poll *poll)
67513 {
67514@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
67515
67516 err = NULL;
67517 d_set_d_op(dentry, &proc_sys_dentry_operations);
67518+
67519+ gr_handle_proc_create(dentry, inode);
67520+
67521 d_add(dentry, inode);
67522
67523 out:
67524@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
67525 struct inode *inode = file_inode(filp);
67526 struct ctl_table_header *head = grab_header(inode);
67527 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
67528+ int op = write ? MAY_WRITE : MAY_READ;
67529 ssize_t error;
67530 size_t res;
67531
67532@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
67533 * and won't be until we finish.
67534 */
67535 error = -EPERM;
67536- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
67537+ if (sysctl_perm(head, table, op))
67538 goto out;
67539
67540 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
67541@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
67542 if (!table->proc_handler)
67543 goto out;
67544
67545+#ifdef CONFIG_GRKERNSEC
67546+ error = -EPERM;
67547+ if (gr_handle_chroot_sysctl(op))
67548+ goto out;
67549+ dget(filp->f_path.dentry);
67550+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
67551+ dput(filp->f_path.dentry);
67552+ goto out;
67553+ }
67554+ dput(filp->f_path.dentry);
67555+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
67556+ goto out;
67557+ if (write) {
67558+ if (current->nsproxy->net_ns != table->extra2) {
67559+ if (!capable(CAP_SYS_ADMIN))
67560+ goto out;
67561+ } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
67562+ goto out;
67563+ }
67564+#endif
67565+
67566 /* careful: calling conventions are nasty here */
67567 res = count;
67568 error = table->proc_handler(table, write, buf, &res, ppos);
67569@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
67570 return false;
67571 } else {
67572 d_set_d_op(child, &proc_sys_dentry_operations);
67573+
67574+ gr_handle_proc_create(child, inode);
67575+
67576 d_add(child, inode);
67577 }
67578 } else {
67579@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, struct ctl_table *table,
67580 if ((*pos)++ < ctx->pos)
67581 return true;
67582
67583+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
67584+ return 0;
67585+
67586 if (unlikely(S_ISLNK(table->mode)))
67587 res = proc_sys_link_fill_cache(file, ctx, head, table);
67588 else
67589@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
67590 if (IS_ERR(head))
67591 return PTR_ERR(head);
67592
67593+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
67594+ return -ENOENT;
67595+
67596 generic_fillattr(inode, stat);
67597 if (table)
67598 stat->mode = (stat->mode & S_IFMT) | table->mode;
67599@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
67600 .llseek = generic_file_llseek,
67601 };
67602
67603-static const struct inode_operations proc_sys_inode_operations = {
67604+const struct inode_operations proc_sys_inode_operations = {
67605 .permission = proc_sys_permission,
67606 .setattr = proc_sys_setattr,
67607 .getattr = proc_sys_getattr,
67608 };
67609
67610-static const struct inode_operations proc_sys_dir_operations = {
67611+const struct inode_operations proc_sys_dir_operations = {
67612 .lookup = proc_sys_lookup,
67613 .permission = proc_sys_permission,
67614 .setattr = proc_sys_setattr,
67615@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
67616 static struct ctl_dir *new_dir(struct ctl_table_set *set,
67617 const char *name, int namelen)
67618 {
67619- struct ctl_table *table;
67620+ ctl_table_no_const *table;
67621 struct ctl_dir *new;
67622 struct ctl_node *node;
67623 char *new_name;
67624@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
67625 return NULL;
67626
67627 node = (struct ctl_node *)(new + 1);
67628- table = (struct ctl_table *)(node + 1);
67629+ table = (ctl_table_no_const *)(node + 1);
67630 new_name = (char *)(table + 2);
67631 memcpy(new_name, name, namelen);
67632 new_name[namelen] = '\0';
67633@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
67634 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
67635 struct ctl_table_root *link_root)
67636 {
67637- struct ctl_table *link_table, *entry, *link;
67638+ ctl_table_no_const *link_table, *link;
67639+ struct ctl_table *entry;
67640 struct ctl_table_header *links;
67641 struct ctl_node *node;
67642 char *link_name;
67643@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
67644 return NULL;
67645
67646 node = (struct ctl_node *)(links + 1);
67647- link_table = (struct ctl_table *)(node + nr_entries);
67648+ link_table = (ctl_table_no_const *)(node + nr_entries);
67649 link_name = (char *)&link_table[nr_entries + 1];
67650
67651 for (link = link_table, entry = table; entry->procname; link++, entry++) {
67652@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
67653 struct ctl_table_header ***subheader, struct ctl_table_set *set,
67654 struct ctl_table *table)
67655 {
67656- struct ctl_table *ctl_table_arg = NULL;
67657- struct ctl_table *entry, *files;
67658+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
67659+ struct ctl_table *entry;
67660 int nr_files = 0;
67661 int nr_dirs = 0;
67662 int err = -ENOMEM;
67663@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
67664 nr_files++;
67665 }
67666
67667- files = table;
67668 /* If there are mixed files and directories we need a new table */
67669 if (nr_dirs && nr_files) {
67670- struct ctl_table *new;
67671+ ctl_table_no_const *new;
67672 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
67673 GFP_KERNEL);
67674 if (!files)
67675@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
67676 /* Register everything except a directory full of subdirectories */
67677 if (nr_files || !nr_dirs) {
67678 struct ctl_table_header *header;
67679- header = __register_sysctl_table(set, path, files);
67680+ header = __register_sysctl_table(set, path, files ? files : table);
67681 if (!header) {
67682 kfree(ctl_table_arg);
67683 goto out;
67684diff --git a/fs/proc/root.c b/fs/proc/root.c
67685index 094e44d..085a877 100644
67686--- a/fs/proc/root.c
67687+++ b/fs/proc/root.c
67688@@ -188,7 +188,15 @@ void __init proc_root_init(void)
67689 proc_mkdir("openprom", NULL);
67690 #endif
67691 proc_tty_init();
67692+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67693+#ifdef CONFIG_GRKERNSEC_PROC_USER
67694+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
67695+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67696+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
67697+#endif
67698+#else
67699 proc_mkdir("bus", NULL);
67700+#endif
67701 proc_sys_init();
67702 }
67703
67704diff --git a/fs/proc/stat.c b/fs/proc/stat.c
67705index bf2d03f..f058f9c 100644
67706--- a/fs/proc/stat.c
67707+++ b/fs/proc/stat.c
67708@@ -11,6 +11,7 @@
67709 #include <linux/irqnr.h>
67710 #include <linux/cputime.h>
67711 #include <linux/tick.h>
67712+#include <linux/grsecurity.h>
67713
67714 #ifndef arch_irq_stat_cpu
67715 #define arch_irq_stat_cpu(cpu) 0
67716@@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v)
67717 u64 sum_softirq = 0;
67718 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
67719 struct timespec boottime;
67720+ int unrestricted = 1;
67721+
67722+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67723+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67724+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
67725+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67726+ && !in_group_p(grsec_proc_gid)
67727+#endif
67728+ )
67729+ unrestricted = 0;
67730+#endif
67731+#endif
67732
67733 user = nice = system = idle = iowait =
67734 irq = softirq = steal = 0;
67735@@ -99,23 +112,25 @@ static int show_stat(struct seq_file *p, void *v)
67736 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
67737 system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
67738 idle += get_idle_time(i);
67739- iowait += get_iowait_time(i);
67740- irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
67741- softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
67742- steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
67743- guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
67744- guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
67745- sum += kstat_cpu_irqs_sum(i);
67746- sum += arch_irq_stat_cpu(i);
67747+ if (unrestricted) {
67748+ iowait += get_iowait_time(i);
67749+ irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
67750+ softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
67751+ steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
67752+ guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
67753+ guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
67754+ sum += kstat_cpu_irqs_sum(i);
67755+ sum += arch_irq_stat_cpu(i);
67756+ for (j = 0; j < NR_SOFTIRQS; j++) {
67757+ unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
67758
67759- for (j = 0; j < NR_SOFTIRQS; j++) {
67760- unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
67761-
67762- per_softirq_sums[j] += softirq_stat;
67763- sum_softirq += softirq_stat;
67764+ per_softirq_sums[j] += softirq_stat;
67765+ sum_softirq += softirq_stat;
67766+ }
67767 }
67768 }
67769- sum += arch_irq_stat();
67770+ if (unrestricted)
67771+ sum += arch_irq_stat();
67772
67773 seq_puts(p, "cpu ");
67774 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
67775@@ -136,12 +151,14 @@ static int show_stat(struct seq_file *p, void *v)
67776 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
67777 system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
67778 idle = get_idle_time(i);
67779- iowait = get_iowait_time(i);
67780- irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
67781- softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
67782- steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
67783- guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
67784- guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
67785+ if (unrestricted) {
67786+ iowait = get_iowait_time(i);
67787+ irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
67788+ softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
67789+ steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
67790+ guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
67791+ guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
67792+ }
67793 seq_printf(p, "cpu%d", i);
67794 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
67795 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
67796@@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v)
67797
67798 /* sum again ? it could be updated? */
67799 for_each_irq_nr(j)
67800- seq_put_decimal_ull(p, ' ', kstat_irqs(j));
67801+ seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs(j) : 0ULL);
67802
67803 seq_printf(p,
67804 "\nctxt %llu\n"
67805@@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
67806 "processes %lu\n"
67807 "procs_running %lu\n"
67808 "procs_blocked %lu\n",
67809- nr_context_switches(),
67810+ unrestricted ? nr_context_switches() : 0ULL,
67811 (unsigned long)jif,
67812- total_forks,
67813- nr_running(),
67814- nr_iowait());
67815+ unrestricted ? total_forks : 0UL,
67816+ unrestricted ? nr_running() : 0UL,
67817+ unrestricted ? nr_iowait() : 0UL);
67818
67819 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
67820
67821diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
67822index c341568..75852a2 100644
67823--- a/fs/proc/task_mmu.c
67824+++ b/fs/proc/task_mmu.c
67825@@ -13,12 +13,19 @@
67826 #include <linux/swap.h>
67827 #include <linux/swapops.h>
67828 #include <linux/mmu_notifier.h>
67829+#include <linux/grsecurity.h>
67830
67831 #include <asm/elf.h>
67832 #include <asm/uaccess.h>
67833 #include <asm/tlbflush.h>
67834 #include "internal.h"
67835
67836+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67837+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
67838+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
67839+ _mm->pax_flags & MF_PAX_SEGMEXEC))
67840+#endif
67841+
67842 void task_mem(struct seq_file *m, struct mm_struct *mm)
67843 {
67844 unsigned long data, text, lib, swap;
67845@@ -54,8 +61,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
67846 "VmExe:\t%8lu kB\n"
67847 "VmLib:\t%8lu kB\n"
67848 "VmPTE:\t%8lu kB\n"
67849- "VmSwap:\t%8lu kB\n",
67850- hiwater_vm << (PAGE_SHIFT-10),
67851+ "VmSwap:\t%8lu kB\n"
67852+
67853+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
67854+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
67855+#endif
67856+
67857+ ,hiwater_vm << (PAGE_SHIFT-10),
67858 total_vm << (PAGE_SHIFT-10),
67859 mm->locked_vm << (PAGE_SHIFT-10),
67860 mm->pinned_vm << (PAGE_SHIFT-10),
67861@@ -65,7 +77,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
67862 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
67863 (PTRS_PER_PTE * sizeof(pte_t) *
67864 atomic_long_read(&mm->nr_ptes)) >> 10,
67865- swap << (PAGE_SHIFT-10));
67866+ swap << (PAGE_SHIFT-10)
67867+
67868+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
67869+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67870+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
67871+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
67872+#else
67873+ , mm->context.user_cs_base
67874+ , mm->context.user_cs_limit
67875+#endif
67876+#endif
67877+
67878+ );
67879 }
67880
67881 unsigned long task_vsize(struct mm_struct *mm)
67882@@ -271,13 +295,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67883 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
67884 }
67885
67886- /* We don't show the stack guard page in /proc/maps */
67887+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67888+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
67889+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
67890+#else
67891 start = vma->vm_start;
67892- if (stack_guard_page_start(vma, start))
67893- start += PAGE_SIZE;
67894 end = vma->vm_end;
67895- if (stack_guard_page_end(vma, end))
67896- end -= PAGE_SIZE;
67897+#endif
67898
67899 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
67900 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
67901@@ -287,7 +311,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67902 flags & VM_WRITE ? 'w' : '-',
67903 flags & VM_EXEC ? 'x' : '-',
67904 flags & VM_MAYSHARE ? 's' : 'p',
67905+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67906+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
67907+#else
67908 pgoff,
67909+#endif
67910 MAJOR(dev), MINOR(dev), ino);
67911
67912 /*
67913@@ -296,7 +324,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67914 */
67915 if (file) {
67916 seq_pad(m, ' ');
67917- seq_path(m, &file->f_path, "\n");
67918+ seq_path(m, &file->f_path, "\n\\");
67919 goto done;
67920 }
67921
67922@@ -328,8 +356,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67923 * Thread stack in /proc/PID/task/TID/maps or
67924 * the main process stack.
67925 */
67926- if (!is_pid || (vma->vm_start <= mm->start_stack &&
67927- vma->vm_end >= mm->start_stack)) {
67928+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
67929+ (vma->vm_start <= mm->start_stack &&
67930+ vma->vm_end >= mm->start_stack)) {
67931 name = "[stack]";
67932 } else {
67933 /* Thread stack in /proc/PID/maps */
67934@@ -353,6 +382,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
67935 struct proc_maps_private *priv = m->private;
67936 struct task_struct *task = priv->task;
67937
67938+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67939+ if (current->exec_id != m->exec_id) {
67940+ gr_log_badprocpid("maps");
67941+ return 0;
67942+ }
67943+#endif
67944+
67945 show_map_vma(m, vma, is_pid);
67946
67947 if (m->count < m->size) /* vma is copied successfully */
67948@@ -593,12 +629,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
67949 .private = &mss,
67950 };
67951
67952+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67953+ if (current->exec_id != m->exec_id) {
67954+ gr_log_badprocpid("smaps");
67955+ return 0;
67956+ }
67957+#endif
67958 memset(&mss, 0, sizeof mss);
67959- mss.vma = vma;
67960- /* mmap_sem is held in m_start */
67961- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
67962- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
67963-
67964+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67965+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
67966+#endif
67967+ mss.vma = vma;
67968+ /* mmap_sem is held in m_start */
67969+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
67970+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
67971+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67972+ }
67973+#endif
67974 show_map_vma(m, vma, is_pid);
67975
67976 seq_printf(m,
67977@@ -616,7 +663,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
67978 "KernelPageSize: %8lu kB\n"
67979 "MMUPageSize: %8lu kB\n"
67980 "Locked: %8lu kB\n",
67981+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67982+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
67983+#else
67984 (vma->vm_end - vma->vm_start) >> 10,
67985+#endif
67986 mss.resident >> 10,
67987 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
67988 mss.shared_clean >> 10,
67989@@ -1422,6 +1473,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
67990 char buffer[64];
67991 int nid;
67992
67993+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67994+ if (current->exec_id != m->exec_id) {
67995+ gr_log_badprocpid("numa_maps");
67996+ return 0;
67997+ }
67998+#endif
67999+
68000 if (!mm)
68001 return 0;
68002
68003@@ -1439,11 +1497,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
68004 mpol_to_str(buffer, sizeof(buffer), pol);
68005 mpol_cond_put(pol);
68006
68007+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68008+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
68009+#else
68010 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
68011+#endif
68012
68013 if (file) {
68014 seq_puts(m, " file=");
68015- seq_path(m, &file->f_path, "\n\t= ");
68016+ seq_path(m, &file->f_path, "\n\t\\= ");
68017 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
68018 seq_puts(m, " heap");
68019 } else {
68020diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
68021index 678455d..ebd3245 100644
68022--- a/fs/proc/task_nommu.c
68023+++ b/fs/proc/task_nommu.c
68024@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
68025 else
68026 bytes += kobjsize(mm);
68027
68028- if (current->fs && current->fs->users > 1)
68029+ if (current->fs && atomic_read(&current->fs->users) > 1)
68030 sbytes += kobjsize(current->fs);
68031 else
68032 bytes += kobjsize(current->fs);
68033@@ -161,7 +161,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
68034
68035 if (file) {
68036 seq_pad(m, ' ');
68037- seq_path(m, &file->f_path, "");
68038+ seq_path(m, &file->f_path, "\n\\");
68039 } else if (mm) {
68040 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
68041
68042diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
68043index a90d6d35..d08047c 100644
68044--- a/fs/proc/vmcore.c
68045+++ b/fs/proc/vmcore.c
68046@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
68047 nr_bytes = count;
68048
68049 /* If pfn is not ram, return zeros for sparse dump files */
68050- if (pfn_is_ram(pfn) == 0)
68051- memset(buf, 0, nr_bytes);
68052- else {
68053+ if (pfn_is_ram(pfn) == 0) {
68054+ if (userbuf) {
68055+ if (clear_user((char __force_user *)buf, nr_bytes))
68056+ return -EFAULT;
68057+ } else
68058+ memset(buf, 0, nr_bytes);
68059+ } else {
68060 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
68061 offset, userbuf);
68062 if (tmp < 0)
68063@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
68064 static int copy_to(void *target, void *src, size_t size, int userbuf)
68065 {
68066 if (userbuf) {
68067- if (copy_to_user((char __user *) target, src, size))
68068+ if (copy_to_user((char __force_user *) target, src, size))
68069 return -EFAULT;
68070 } else {
68071 memcpy(target, src, size);
68072@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
68073 if (*fpos < m->offset + m->size) {
68074 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
68075 start = m->paddr + *fpos - m->offset;
68076- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
68077+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
68078 if (tmp < 0)
68079 return tmp;
68080 buflen -= tsz;
68081@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
68082 static ssize_t read_vmcore(struct file *file, char __user *buffer,
68083 size_t buflen, loff_t *fpos)
68084 {
68085- return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
68086+ return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
68087 }
68088
68089 /*
68090diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
68091index d3fb2b6..43a8140 100644
68092--- a/fs/qnx6/qnx6.h
68093+++ b/fs/qnx6/qnx6.h
68094@@ -74,7 +74,7 @@ enum {
68095 BYTESEX_BE,
68096 };
68097
68098-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
68099+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
68100 {
68101 if (sbi->s_bytesex == BYTESEX_LE)
68102 return le64_to_cpu((__force __le64)n);
68103@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
68104 return (__force __fs64)cpu_to_be64(n);
68105 }
68106
68107-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
68108+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
68109 {
68110 if (sbi->s_bytesex == BYTESEX_LE)
68111 return le32_to_cpu((__force __le32)n);
68112diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
68113index bb2869f..d34ada8 100644
68114--- a/fs/quota/netlink.c
68115+++ b/fs/quota/netlink.c
68116@@ -44,7 +44,7 @@ static struct genl_family quota_genl_family = {
68117 void quota_send_warning(struct kqid qid, dev_t dev,
68118 const char warntype)
68119 {
68120- static atomic_t seq;
68121+ static atomic_unchecked_t seq;
68122 struct sk_buff *skb;
68123 void *msg_head;
68124 int ret;
68125@@ -60,7 +60,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
68126 "VFS: Not enough memory to send quota warning.\n");
68127 return;
68128 }
68129- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
68130+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
68131 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
68132 if (!msg_head) {
68133 printk(KERN_ERR
68134diff --git a/fs/read_write.c b/fs/read_write.c
68135index 009d854..16ce214 100644
68136--- a/fs/read_write.c
68137+++ b/fs/read_write.c
68138@@ -495,7 +495,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
68139
68140 old_fs = get_fs();
68141 set_fs(get_ds());
68142- p = (__force const char __user *)buf;
68143+ p = (const char __force_user *)buf;
68144 if (count > MAX_RW_COUNT)
68145 count = MAX_RW_COUNT;
68146 if (file->f_op->write)
68147diff --git a/fs/readdir.c b/fs/readdir.c
68148index 33fd922..e0d6094 100644
68149--- a/fs/readdir.c
68150+++ b/fs/readdir.c
68151@@ -18,6 +18,7 @@
68152 #include <linux/security.h>
68153 #include <linux/syscalls.h>
68154 #include <linux/unistd.h>
68155+#include <linux/namei.h>
68156
68157 #include <asm/uaccess.h>
68158
68159@@ -71,6 +72,7 @@ struct old_linux_dirent {
68160 struct readdir_callback {
68161 struct dir_context ctx;
68162 struct old_linux_dirent __user * dirent;
68163+ struct file * file;
68164 int result;
68165 };
68166
68167@@ -88,6 +90,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
68168 buf->result = -EOVERFLOW;
68169 return -EOVERFLOW;
68170 }
68171+
68172+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68173+ return 0;
68174+
68175 buf->result++;
68176 dirent = buf->dirent;
68177 if (!access_ok(VERIFY_WRITE, dirent,
68178@@ -119,6 +125,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
68179 if (!f.file)
68180 return -EBADF;
68181
68182+ buf.file = f.file;
68183 error = iterate_dir(f.file, &buf.ctx);
68184 if (buf.result)
68185 error = buf.result;
68186@@ -144,6 +151,7 @@ struct getdents_callback {
68187 struct dir_context ctx;
68188 struct linux_dirent __user * current_dir;
68189 struct linux_dirent __user * previous;
68190+ struct file * file;
68191 int count;
68192 int error;
68193 };
68194@@ -165,6 +173,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
68195 buf->error = -EOVERFLOW;
68196 return -EOVERFLOW;
68197 }
68198+
68199+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68200+ return 0;
68201+
68202 dirent = buf->previous;
68203 if (dirent) {
68204 if (__put_user(offset, &dirent->d_off))
68205@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
68206 if (!f.file)
68207 return -EBADF;
68208
68209+ buf.file = f.file;
68210 error = iterate_dir(f.file, &buf.ctx);
68211 if (error >= 0)
68212 error = buf.error;
68213@@ -228,6 +241,7 @@ struct getdents_callback64 {
68214 struct dir_context ctx;
68215 struct linux_dirent64 __user * current_dir;
68216 struct linux_dirent64 __user * previous;
68217+ struct file *file;
68218 int count;
68219 int error;
68220 };
68221@@ -243,6 +257,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
68222 buf->error = -EINVAL; /* only used if we fail.. */
68223 if (reclen > buf->count)
68224 return -EINVAL;
68225+
68226+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68227+ return 0;
68228+
68229 dirent = buf->previous;
68230 if (dirent) {
68231 if (__put_user(offset, &dirent->d_off))
68232@@ -290,6 +308,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
68233 if (!f.file)
68234 return -EBADF;
68235
68236+ buf.file = f.file;
68237 error = iterate_dir(f.file, &buf.ctx);
68238 if (error >= 0)
68239 error = buf.error;
68240diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
68241index 9c02d96..6562c10 100644
68242--- a/fs/reiserfs/do_balan.c
68243+++ b/fs/reiserfs/do_balan.c
68244@@ -1887,7 +1887,7 @@ void do_balance(struct tree_balance *tb, struct item_head *ih,
68245 return;
68246 }
68247
68248- atomic_inc(&fs_generation(tb->tb_sb));
68249+ atomic_inc_unchecked(&fs_generation(tb->tb_sb));
68250 do_balance_starts(tb);
68251
68252 /*
68253diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
68254index aca73dd..e3c558d 100644
68255--- a/fs/reiserfs/item_ops.c
68256+++ b/fs/reiserfs/item_ops.c
68257@@ -724,18 +724,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
68258 }
68259
68260 static struct item_operations errcatch_ops = {
68261- errcatch_bytes_number,
68262- errcatch_decrement_key,
68263- errcatch_is_left_mergeable,
68264- errcatch_print_item,
68265- errcatch_check_item,
68266+ .bytes_number = errcatch_bytes_number,
68267+ .decrement_key = errcatch_decrement_key,
68268+ .is_left_mergeable = errcatch_is_left_mergeable,
68269+ .print_item = errcatch_print_item,
68270+ .check_item = errcatch_check_item,
68271
68272- errcatch_create_vi,
68273- errcatch_check_left,
68274- errcatch_check_right,
68275- errcatch_part_size,
68276- errcatch_unit_num,
68277- errcatch_print_vi
68278+ .create_vi = errcatch_create_vi,
68279+ .check_left = errcatch_check_left,
68280+ .check_right = errcatch_check_right,
68281+ .part_size = errcatch_part_size,
68282+ .unit_num = errcatch_unit_num,
68283+ .print_vi = errcatch_print_vi
68284 };
68285
68286 #if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
68287diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
68288index 621b9f3..af527fd 100644
68289--- a/fs/reiserfs/procfs.c
68290+++ b/fs/reiserfs/procfs.c
68291@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
68292 "SMALL_TAILS " : "NO_TAILS ",
68293 replay_only(sb) ? "REPLAY_ONLY " : "",
68294 convert_reiserfs(sb) ? "CONV " : "",
68295- atomic_read(&r->s_generation_counter),
68296+ atomic_read_unchecked(&r->s_generation_counter),
68297 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
68298 SF(s_do_balance), SF(s_unneeded_left_neighbor),
68299 SF(s_good_search_by_key_reada), SF(s_bmaps),
68300diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
68301index 735c2c2..81b91af 100644
68302--- a/fs/reiserfs/reiserfs.h
68303+++ b/fs/reiserfs/reiserfs.h
68304@@ -573,7 +573,7 @@ struct reiserfs_sb_info {
68305 /* Comment? -Hans */
68306 wait_queue_head_t s_wait;
68307 /* increased by one every time the tree gets re-balanced */
68308- atomic_t s_generation_counter;
68309+ atomic_unchecked_t s_generation_counter;
68310
68311 /* File system properties. Currently holds on-disk FS format */
68312 unsigned long s_properties;
68313@@ -2294,7 +2294,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
68314 #define REISERFS_USER_MEM 1 /* user memory mode */
68315
68316 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
68317-#define get_generation(s) atomic_read (&fs_generation(s))
68318+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
68319 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
68320 #define __fs_changed(gen,s) (gen != get_generation (s))
68321 #define fs_changed(gen,s) \
68322diff --git a/fs/select.c b/fs/select.c
68323index 467bb1c..cf9d65a 100644
68324--- a/fs/select.c
68325+++ b/fs/select.c
68326@@ -20,6 +20,7 @@
68327 #include <linux/export.h>
68328 #include <linux/slab.h>
68329 #include <linux/poll.h>
68330+#include <linux/security.h>
68331 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
68332 #include <linux/file.h>
68333 #include <linux/fdtable.h>
68334@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
68335 struct poll_list *walk = head;
68336 unsigned long todo = nfds;
68337
68338+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
68339 if (nfds > rlimit(RLIMIT_NOFILE))
68340 return -EINVAL;
68341
68342diff --git a/fs/seq_file.c b/fs/seq_file.c
68343index 3857b72..0b7281e 100644
68344--- a/fs/seq_file.c
68345+++ b/fs/seq_file.c
68346@@ -12,6 +12,8 @@
68347 #include <linux/slab.h>
68348 #include <linux/cred.h>
68349 #include <linux/mm.h>
68350+#include <linux/sched.h>
68351+#include <linux/grsecurity.h>
68352
68353 #include <asm/uaccess.h>
68354 #include <asm/page.h>
68355@@ -34,12 +36,7 @@ static void seq_set_overflow(struct seq_file *m)
68356
68357 static void *seq_buf_alloc(unsigned long size)
68358 {
68359- void *buf;
68360-
68361- buf = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
68362- if (!buf && size > PAGE_SIZE)
68363- buf = vmalloc(size);
68364- return buf;
68365+ return kmalloc(size, GFP_KERNEL | GFP_USERCOPY);
68366 }
68367
68368 /**
68369@@ -72,6 +69,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
68370 #ifdef CONFIG_USER_NS
68371 p->user_ns = file->f_cred->user_ns;
68372 #endif
68373+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68374+ p->exec_id = current->exec_id;
68375+#endif
68376
68377 /*
68378 * Wrappers around seq_open(e.g. swaps_open) need to be
68379@@ -94,6 +94,16 @@ int seq_open(struct file *file, const struct seq_operations *op)
68380 }
68381 EXPORT_SYMBOL(seq_open);
68382
68383+
68384+int seq_open_restrict(struct file *file, const struct seq_operations *op)
68385+{
68386+ if (gr_proc_is_restricted())
68387+ return -EACCES;
68388+
68389+ return seq_open(file, op);
68390+}
68391+EXPORT_SYMBOL(seq_open_restrict);
68392+
68393 static int traverse(struct seq_file *m, loff_t offset)
68394 {
68395 loff_t pos = 0, index;
68396@@ -165,7 +175,7 @@ Eoverflow:
68397 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
68398 {
68399 struct seq_file *m = file->private_data;
68400- size_t copied = 0;
68401+ ssize_t copied = 0;
68402 loff_t pos;
68403 size_t n;
68404 void *p;
68405@@ -596,7 +606,7 @@ static void single_stop(struct seq_file *p, void *v)
68406 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
68407 void *data)
68408 {
68409- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
68410+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
68411 int res = -ENOMEM;
68412
68413 if (op) {
68414@@ -632,6 +642,17 @@ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
68415 }
68416 EXPORT_SYMBOL(single_open_size);
68417
68418+int single_open_restrict(struct file *file, int (*show)(struct seq_file *, void *),
68419+ void *data)
68420+{
68421+ if (gr_proc_is_restricted())
68422+ return -EACCES;
68423+
68424+ return single_open(file, show, data);
68425+}
68426+EXPORT_SYMBOL(single_open_restrict);
68427+
68428+
68429 int single_release(struct inode *inode, struct file *file)
68430 {
68431 const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
68432diff --git a/fs/splice.c b/fs/splice.c
68433index f5cb9ba..8ddb1e9 100644
68434--- a/fs/splice.c
68435+++ b/fs/splice.c
68436@@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
68437 pipe_lock(pipe);
68438
68439 for (;;) {
68440- if (!pipe->readers) {
68441+ if (!atomic_read(&pipe->readers)) {
68442 send_sig(SIGPIPE, current, 0);
68443 if (!ret)
68444 ret = -EPIPE;
68445@@ -216,7 +216,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
68446 page_nr++;
68447 ret += buf->len;
68448
68449- if (pipe->files)
68450+ if (atomic_read(&pipe->files))
68451 do_wakeup = 1;
68452
68453 if (!--spd->nr_pages)
68454@@ -247,9 +247,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
68455 do_wakeup = 0;
68456 }
68457
68458- pipe->waiting_writers++;
68459+ atomic_inc(&pipe->waiting_writers);
68460 pipe_wait(pipe);
68461- pipe->waiting_writers--;
68462+ atomic_dec(&pipe->waiting_writers);
68463 }
68464
68465 pipe_unlock(pipe);
68466@@ -576,7 +576,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
68467 old_fs = get_fs();
68468 set_fs(get_ds());
68469 /* The cast to a user pointer is valid due to the set_fs() */
68470- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
68471+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
68472 set_fs(old_fs);
68473
68474 return res;
68475@@ -591,7 +591,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
68476 old_fs = get_fs();
68477 set_fs(get_ds());
68478 /* The cast to a user pointer is valid due to the set_fs() */
68479- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
68480+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
68481 set_fs(old_fs);
68482
68483 return res;
68484@@ -644,7 +644,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
68485 goto err;
68486
68487 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
68488- vec[i].iov_base = (void __user *) page_address(page);
68489+ vec[i].iov_base = (void __force_user *) page_address(page);
68490 vec[i].iov_len = this_len;
68491 spd.pages[i] = page;
68492 spd.nr_pages++;
68493@@ -783,7 +783,7 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
68494 ops->release(pipe, buf);
68495 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
68496 pipe->nrbufs--;
68497- if (pipe->files)
68498+ if (atomic_read(&pipe->files))
68499 sd->need_wakeup = true;
68500 }
68501
68502@@ -807,10 +807,10 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
68503 static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
68504 {
68505 while (!pipe->nrbufs) {
68506- if (!pipe->writers)
68507+ if (!atomic_read(&pipe->writers))
68508 return 0;
68509
68510- if (!pipe->waiting_writers && sd->num_spliced)
68511+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
68512 return 0;
68513
68514 if (sd->flags & SPLICE_F_NONBLOCK)
68515@@ -1040,7 +1040,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
68516 ops->release(pipe, buf);
68517 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
68518 pipe->nrbufs--;
68519- if (pipe->files)
68520+ if (atomic_read(&pipe->files))
68521 sd.need_wakeup = true;
68522 } else {
68523 buf->offset += ret;
68524@@ -1200,7 +1200,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
68525 * out of the pipe right after the splice_to_pipe(). So set
68526 * PIPE_READERS appropriately.
68527 */
68528- pipe->readers = 1;
68529+ atomic_set(&pipe->readers, 1);
68530
68531 current->splice_pipe = pipe;
68532 }
68533@@ -1496,6 +1496,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
68534
68535 partial[buffers].offset = off;
68536 partial[buffers].len = plen;
68537+ partial[buffers].private = 0;
68538
68539 off = 0;
68540 len -= plen;
68541@@ -1732,9 +1733,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
68542 ret = -ERESTARTSYS;
68543 break;
68544 }
68545- if (!pipe->writers)
68546+ if (!atomic_read(&pipe->writers))
68547 break;
68548- if (!pipe->waiting_writers) {
68549+ if (!atomic_read(&pipe->waiting_writers)) {
68550 if (flags & SPLICE_F_NONBLOCK) {
68551 ret = -EAGAIN;
68552 break;
68553@@ -1766,7 +1767,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
68554 pipe_lock(pipe);
68555
68556 while (pipe->nrbufs >= pipe->buffers) {
68557- if (!pipe->readers) {
68558+ if (!atomic_read(&pipe->readers)) {
68559 send_sig(SIGPIPE, current, 0);
68560 ret = -EPIPE;
68561 break;
68562@@ -1779,9 +1780,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
68563 ret = -ERESTARTSYS;
68564 break;
68565 }
68566- pipe->waiting_writers++;
68567+ atomic_inc(&pipe->waiting_writers);
68568 pipe_wait(pipe);
68569- pipe->waiting_writers--;
68570+ atomic_dec(&pipe->waiting_writers);
68571 }
68572
68573 pipe_unlock(pipe);
68574@@ -1817,14 +1818,14 @@ retry:
68575 pipe_double_lock(ipipe, opipe);
68576
68577 do {
68578- if (!opipe->readers) {
68579+ if (!atomic_read(&opipe->readers)) {
68580 send_sig(SIGPIPE, current, 0);
68581 if (!ret)
68582 ret = -EPIPE;
68583 break;
68584 }
68585
68586- if (!ipipe->nrbufs && !ipipe->writers)
68587+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
68588 break;
68589
68590 /*
68591@@ -1921,7 +1922,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
68592 pipe_double_lock(ipipe, opipe);
68593
68594 do {
68595- if (!opipe->readers) {
68596+ if (!atomic_read(&opipe->readers)) {
68597 send_sig(SIGPIPE, current, 0);
68598 if (!ret)
68599 ret = -EPIPE;
68600@@ -1966,7 +1967,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
68601 * return EAGAIN if we have the potential of some data in the
68602 * future, otherwise just return 0
68603 */
68604- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
68605+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
68606 ret = -EAGAIN;
68607
68608 pipe_unlock(ipipe);
68609diff --git a/fs/stat.c b/fs/stat.c
68610index ae0c3ce..9ee641c 100644
68611--- a/fs/stat.c
68612+++ b/fs/stat.c
68613@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
68614 stat->gid = inode->i_gid;
68615 stat->rdev = inode->i_rdev;
68616 stat->size = i_size_read(inode);
68617- stat->atime = inode->i_atime;
68618- stat->mtime = inode->i_mtime;
68619+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
68620+ stat->atime = inode->i_ctime;
68621+ stat->mtime = inode->i_ctime;
68622+ } else {
68623+ stat->atime = inode->i_atime;
68624+ stat->mtime = inode->i_mtime;
68625+ }
68626 stat->ctime = inode->i_ctime;
68627 stat->blksize = (1 << inode->i_blkbits);
68628 stat->blocks = inode->i_blocks;
68629@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
68630 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
68631 {
68632 struct inode *inode = path->dentry->d_inode;
68633+ int retval;
68634
68635- if (inode->i_op->getattr)
68636- return inode->i_op->getattr(path->mnt, path->dentry, stat);
68637+ if (inode->i_op->getattr) {
68638+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
68639+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
68640+ stat->atime = stat->ctime;
68641+ stat->mtime = stat->ctime;
68642+ }
68643+ return retval;
68644+ }
68645
68646 generic_fillattr(inode, stat);
68647 return 0;
68648diff --git a/fs/super.c b/fs/super.c
68649index b9a214d..6f8c954 100644
68650--- a/fs/super.c
68651+++ b/fs/super.c
68652@@ -80,6 +80,8 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
68653 inodes = list_lru_count_node(&sb->s_inode_lru, sc->nid);
68654 dentries = list_lru_count_node(&sb->s_dentry_lru, sc->nid);
68655 total_objects = dentries + inodes + fs_objects + 1;
68656+ if (!total_objects)
68657+ total_objects = 1;
68658
68659 /* proportion the scan between the caches */
68660 dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
68661diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
68662index 0b45ff4..847de5b 100644
68663--- a/fs/sysfs/dir.c
68664+++ b/fs/sysfs/dir.c
68665@@ -41,9 +41,16 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
68666 int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
68667 {
68668 struct kernfs_node *parent, *kn;
68669+ const char *name;
68670+ umode_t mode = S_IRWXU | S_IRUGO | S_IXUGO;
68671+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
68672+ const char *parent_name;
68673+#endif
68674
68675 BUG_ON(!kobj);
68676
68677+ name = kobject_name(kobj);
68678+
68679 if (kobj->parent)
68680 parent = kobj->parent->sd;
68681 else
68682@@ -52,11 +59,22 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
68683 if (!parent)
68684 return -ENOENT;
68685
68686- kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
68687- S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns);
68688+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
68689+ parent_name = parent->name;
68690+ mode = S_IRWXU;
68691+
68692+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
68693+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
68694+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
68695+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
68696+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
68697+#endif
68698+
68699+ kn = kernfs_create_dir_ns(parent, name,
68700+ mode, kobj, ns);
68701 if (IS_ERR(kn)) {
68702 if (PTR_ERR(kn) == -EEXIST)
68703- sysfs_warn_dup(parent, kobject_name(kobj));
68704+ sysfs_warn_dup(parent, name);
68705 return PTR_ERR(kn);
68706 }
68707
68708diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
68709index 69d4889..a810bd4 100644
68710--- a/fs/sysv/sysv.h
68711+++ b/fs/sysv/sysv.h
68712@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
68713 #endif
68714 }
68715
68716-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
68717+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
68718 {
68719 if (sbi->s_bytesex == BYTESEX_PDP)
68720 return PDP_swab((__force __u32)n);
68721diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
68722index fb08b0c..65fcc7e 100644
68723--- a/fs/ubifs/io.c
68724+++ b/fs/ubifs/io.c
68725@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
68726 return err;
68727 }
68728
68729-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
68730+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
68731 {
68732 int err;
68733
68734diff --git a/fs/udf/misc.c b/fs/udf/misc.c
68735index c175b4d..8f36a16 100644
68736--- a/fs/udf/misc.c
68737+++ b/fs/udf/misc.c
68738@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
68739
68740 u8 udf_tag_checksum(const struct tag *t)
68741 {
68742- u8 *data = (u8 *)t;
68743+ const u8 *data = (const u8 *)t;
68744 u8 checksum = 0;
68745 int i;
68746 for (i = 0; i < sizeof(struct tag); ++i)
68747diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
68748index 8d974c4..b82f6ec 100644
68749--- a/fs/ufs/swab.h
68750+++ b/fs/ufs/swab.h
68751@@ -22,7 +22,7 @@ enum {
68752 BYTESEX_BE
68753 };
68754
68755-static inline u64
68756+static inline u64 __intentional_overflow(-1)
68757 fs64_to_cpu(struct super_block *sbp, __fs64 n)
68758 {
68759 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
68760@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
68761 return (__force __fs64)cpu_to_be64(n);
68762 }
68763
68764-static inline u32
68765+static inline u32 __intentional_overflow(-1)
68766 fs32_to_cpu(struct super_block *sbp, __fs32 n)
68767 {
68768 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
68769diff --git a/fs/utimes.c b/fs/utimes.c
68770index aa138d6..5f3a811 100644
68771--- a/fs/utimes.c
68772+++ b/fs/utimes.c
68773@@ -1,6 +1,7 @@
68774 #include <linux/compiler.h>
68775 #include <linux/file.h>
68776 #include <linux/fs.h>
68777+#include <linux/security.h>
68778 #include <linux/linkage.h>
68779 #include <linux/mount.h>
68780 #include <linux/namei.h>
68781@@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times)
68782 }
68783 }
68784 retry_deleg:
68785+
68786+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
68787+ error = -EACCES;
68788+ goto mnt_drop_write_and_out;
68789+ }
68790+
68791 mutex_lock(&inode->i_mutex);
68792 error = notify_change(path->dentry, &newattrs, &delegated_inode);
68793 mutex_unlock(&inode->i_mutex);
68794diff --git a/fs/xattr.c b/fs/xattr.c
68795index c69e6d4..cc56af5 100644
68796--- a/fs/xattr.c
68797+++ b/fs/xattr.c
68798@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
68799 return rc;
68800 }
68801
68802+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
68803+ssize_t
68804+pax_getxattr(struct dentry *dentry, void *value, size_t size)
68805+{
68806+ struct inode *inode = dentry->d_inode;
68807+ ssize_t error;
68808+
68809+ error = inode_permission(inode, MAY_EXEC);
68810+ if (error)
68811+ return error;
68812+
68813+ if (inode->i_op->getxattr)
68814+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
68815+ else
68816+ error = -EOPNOTSUPP;
68817+
68818+ return error;
68819+}
68820+EXPORT_SYMBOL(pax_getxattr);
68821+#endif
68822+
68823 ssize_t
68824 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
68825 {
68826@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
68827 * Extended attribute SET operations
68828 */
68829 static long
68830-setxattr(struct dentry *d, const char __user *name, const void __user *value,
68831+setxattr(struct path *path, const char __user *name, const void __user *value,
68832 size_t size, int flags)
68833 {
68834 int error;
68835@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
68836 posix_acl_fix_xattr_from_user(kvalue, size);
68837 }
68838
68839- error = vfs_setxattr(d, kname, kvalue, size, flags);
68840+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
68841+ error = -EACCES;
68842+ goto out;
68843+ }
68844+
68845+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
68846 out:
68847 if (vvalue)
68848 vfree(vvalue);
68849@@ -377,7 +403,7 @@ retry:
68850 return error;
68851 error = mnt_want_write(path.mnt);
68852 if (!error) {
68853- error = setxattr(path.dentry, name, value, size, flags);
68854+ error = setxattr(&path, name, value, size, flags);
68855 mnt_drop_write(path.mnt);
68856 }
68857 path_put(&path);
68858@@ -401,7 +427,7 @@ retry:
68859 return error;
68860 error = mnt_want_write(path.mnt);
68861 if (!error) {
68862- error = setxattr(path.dentry, name, value, size, flags);
68863+ error = setxattr(&path, name, value, size, flags);
68864 mnt_drop_write(path.mnt);
68865 }
68866 path_put(&path);
68867@@ -416,16 +442,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
68868 const void __user *,value, size_t, size, int, flags)
68869 {
68870 struct fd f = fdget(fd);
68871- struct dentry *dentry;
68872 int error = -EBADF;
68873
68874 if (!f.file)
68875 return error;
68876- dentry = f.file->f_path.dentry;
68877- audit_inode(NULL, dentry, 0);
68878+ audit_inode(NULL, f.file->f_path.dentry, 0);
68879 error = mnt_want_write_file(f.file);
68880 if (!error) {
68881- error = setxattr(dentry, name, value, size, flags);
68882+ error = setxattr(&f.file->f_path, name, value, size, flags);
68883 mnt_drop_write_file(f.file);
68884 }
68885 fdput(f);
68886@@ -626,7 +650,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
68887 * Extended attribute REMOVE operations
68888 */
68889 static long
68890-removexattr(struct dentry *d, const char __user *name)
68891+removexattr(struct path *path, const char __user *name)
68892 {
68893 int error;
68894 char kname[XATTR_NAME_MAX + 1];
68895@@ -637,7 +661,10 @@ removexattr(struct dentry *d, const char __user *name)
68896 if (error < 0)
68897 return error;
68898
68899- return vfs_removexattr(d, kname);
68900+ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
68901+ return -EACCES;
68902+
68903+ return vfs_removexattr(path->dentry, kname);
68904 }
68905
68906 SYSCALL_DEFINE2(removexattr, const char __user *, pathname,
68907@@ -652,7 +679,7 @@ retry:
68908 return error;
68909 error = mnt_want_write(path.mnt);
68910 if (!error) {
68911- error = removexattr(path.dentry, name);
68912+ error = removexattr(&path, name);
68913 mnt_drop_write(path.mnt);
68914 }
68915 path_put(&path);
68916@@ -675,7 +702,7 @@ retry:
68917 return error;
68918 error = mnt_want_write(path.mnt);
68919 if (!error) {
68920- error = removexattr(path.dentry, name);
68921+ error = removexattr(&path, name);
68922 mnt_drop_write(path.mnt);
68923 }
68924 path_put(&path);
68925@@ -689,16 +716,16 @@ retry:
68926 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
68927 {
68928 struct fd f = fdget(fd);
68929- struct dentry *dentry;
68930+ struct path *path;
68931 int error = -EBADF;
68932
68933 if (!f.file)
68934 return error;
68935- dentry = f.file->f_path.dentry;
68936- audit_inode(NULL, dentry, 0);
68937+ path = &f.file->f_path;
68938+ audit_inode(NULL, path->dentry, 0);
68939 error = mnt_want_write_file(f.file);
68940 if (!error) {
68941- error = removexattr(dentry, name);
68942+ error = removexattr(path, name);
68943 mnt_drop_write_file(f.file);
68944 }
68945 fdput(f);
68946diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
68947index 86df952..ac430d6 100644
68948--- a/fs/xfs/libxfs/xfs_bmap.c
68949+++ b/fs/xfs/libxfs/xfs_bmap.c
68950@@ -583,7 +583,7 @@ xfs_bmap_validate_ret(
68951
68952 #else
68953 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
68954-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
68955+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
68956 #endif /* DEBUG */
68957
68958 /*
68959diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
68960index f1b69ed..3d0222f 100644
68961--- a/fs/xfs/xfs_dir2_readdir.c
68962+++ b/fs/xfs/xfs_dir2_readdir.c
68963@@ -159,7 +159,12 @@ xfs_dir2_sf_getdents(
68964 ino = dp->d_ops->sf_get_ino(sfp, sfep);
68965 filetype = dp->d_ops->sf_get_ftype(sfep);
68966 ctx->pos = off & 0x7fffffff;
68967- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
68968+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
68969+ char name[sfep->namelen];
68970+ memcpy(name, sfep->name, sfep->namelen);
68971+ if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(dp->i_mount, filetype)))
68972+ return 0;
68973+ } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
68974 xfs_dir3_get_dtype(dp->i_mount, filetype)))
68975 return 0;
68976 sfep = dp->d_ops->sf_nextentry(sfp, sfep);
68977diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
68978index 3799695..0ddc953 100644
68979--- a/fs/xfs/xfs_ioctl.c
68980+++ b/fs/xfs/xfs_ioctl.c
68981@@ -122,7 +122,7 @@ xfs_find_handle(
68982 }
68983
68984 error = -EFAULT;
68985- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
68986+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
68987 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
68988 goto out_put;
68989
68990diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
68991new file mode 100644
68992index 0000000..f27264e
68993--- /dev/null
68994+++ b/grsecurity/Kconfig
68995@@ -0,0 +1,1166 @@
68996+#
68997+# grecurity configuration
68998+#
68999+menu "Memory Protections"
69000+depends on GRKERNSEC
69001+
69002+config GRKERNSEC_KMEM
69003+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
69004+ default y if GRKERNSEC_CONFIG_AUTO
69005+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
69006+ help
69007+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
69008+ be written to or read from to modify or leak the contents of the running
69009+ kernel. /dev/port will also not be allowed to be opened, writing to
69010+ /dev/cpu/*/msr will be prevented, and support for kexec will be removed.
69011+ If you have module support disabled, enabling this will close up several
69012+ ways that are currently used to insert malicious code into the running
69013+ kernel.
69014+
69015+ Even with this feature enabled, we still highly recommend that
69016+ you use the RBAC system, as it is still possible for an attacker to
69017+ modify the running kernel through other more obscure methods.
69018+
69019+ It is highly recommended that you say Y here if you meet all the
69020+ conditions above.
69021+
69022+config GRKERNSEC_VM86
69023+ bool "Restrict VM86 mode"
69024+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
69025+ depends on X86_32
69026+
69027+ help
69028+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
69029+ make use of a special execution mode on 32bit x86 processors called
69030+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
69031+ video cards and will still work with this option enabled. The purpose
69032+ of the option is to prevent exploitation of emulation errors in
69033+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
69034+ Nearly all users should be able to enable this option.
69035+
69036+config GRKERNSEC_IO
69037+ bool "Disable privileged I/O"
69038+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
69039+ depends on X86
69040+ select RTC_CLASS
69041+ select RTC_INTF_DEV
69042+ select RTC_DRV_CMOS
69043+
69044+ help
69045+ If you say Y here, all ioperm and iopl calls will return an error.
69046+ Ioperm and iopl can be used to modify the running kernel.
69047+ Unfortunately, some programs need this access to operate properly,
69048+ the most notable of which are XFree86 and hwclock. hwclock can be
69049+ remedied by having RTC support in the kernel, so real-time
69050+ clock support is enabled if this option is enabled, to ensure
69051+ that hwclock operates correctly. If hwclock still does not work,
69052+ either update udev or symlink /dev/rtc to /dev/rtc0.
69053+
69054+ If you're using XFree86 or a version of Xorg from 2012 or earlier,
69055+ you may not be able to boot into a graphical environment with this
69056+ option enabled. In this case, you should use the RBAC system instead.
69057+
69058+config GRKERNSEC_BPF_HARDEN
69059+ bool "Harden BPF interpreter"
69060+ default y if GRKERNSEC_CONFIG_AUTO
69061+ help
69062+ Unlike previous versions of grsecurity that hardened both the BPF
69063+ interpreted code against corruption at rest as well as the JIT code
69064+ against JIT-spray attacks and attacker-controlled immediate values
69065+ for ROP, this feature will enforce disabling of the new eBPF JIT engine
69066+ and will ensure the interpreted code is read-only at rest. This feature
69067+ may be removed at a later time when eBPF stabilizes to entirely revert
69068+ back to the more secure pre-3.16 BPF interpreter/JIT.
69069+
69070+ If you're using KERNEXEC, it's recommended that you enable this option
69071+ to supplement the hardening of the kernel.
69072+
69073+config GRKERNSEC_PERF_HARDEN
69074+ bool "Disable unprivileged PERF_EVENTS usage by default"
69075+ default y if GRKERNSEC_CONFIG_AUTO
69076+ depends on PERF_EVENTS
69077+ help
69078+ If you say Y here, the range of acceptable values for the
69079+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
69080+ default to a new value: 3. When the sysctl is set to this value, no
69081+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
69082+
69083+ Though PERF_EVENTS can be used legitimately for performance monitoring
69084+ and low-level application profiling, it is forced on regardless of
69085+ configuration, has been at fault for several vulnerabilities, and
69086+ creates new opportunities for side channels and other information leaks.
69087+
69088+ This feature puts PERF_EVENTS into a secure default state and permits
69089+ the administrator to change out of it temporarily if unprivileged
69090+ application profiling is needed.
69091+
69092+config GRKERNSEC_RAND_THREADSTACK
69093+ bool "Insert random gaps between thread stacks"
69094+ default y if GRKERNSEC_CONFIG_AUTO
69095+ depends on PAX_RANDMMAP && !PPC
69096+ help
69097+ If you say Y here, a random-sized gap will be enforced between allocated
69098+ thread stacks. Glibc's NPTL and other threading libraries that
69099+ pass MAP_STACK to the kernel for thread stack allocation are supported.
69100+ The implementation currently provides 8 bits of entropy for the gap.
69101+
69102+ Many distributions do not compile threaded remote services with the
69103+ -fstack-check argument to GCC, causing the variable-sized stack-based
69104+ allocator, alloca(), to not probe the stack on allocation. This
69105+ permits an unbounded alloca() to skip over any guard page and potentially
69106+ modify another thread's stack reliably. An enforced random gap
69107+ reduces the reliability of such an attack and increases the chance
69108+ that such a read/write to another thread's stack instead lands in
69109+ an unmapped area, causing a crash and triggering grsecurity's
69110+ anti-bruteforcing logic.
69111+
69112+config GRKERNSEC_PROC_MEMMAP
69113+ bool "Harden ASLR against information leaks and entropy reduction"
69114+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
69115+ depends on PAX_NOEXEC || PAX_ASLR
69116+ help
69117+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
69118+ give no information about the addresses of its mappings if
69119+ PaX features that rely on random addresses are enabled on the task.
69120+ In addition to sanitizing this information and disabling other
69121+ dangerous sources of information, this option causes reads of sensitive
69122+ /proc/<pid> entries where the file descriptor was opened in a different
69123+ task than the one performing the read. Such attempts are logged.
69124+ This option also limits argv/env strings for suid/sgid binaries
69125+ to 512KB to prevent a complete exhaustion of the stack entropy provided
69126+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
69127+ binaries to prevent alternative mmap layouts from being abused.
69128+
69129+ If you use PaX it is essential that you say Y here as it closes up
69130+ several holes that make full ASLR useless locally.
69131+
69132+
69133+config GRKERNSEC_KSTACKOVERFLOW
69134+ bool "Prevent kernel stack overflows"
69135+ default y if GRKERNSEC_CONFIG_AUTO
69136+ depends on !IA64 && 64BIT
69137+ help
69138+ If you say Y here, the kernel's process stacks will be allocated
69139+ with vmalloc instead of the kernel's default allocator. This
69140+ introduces guard pages that in combination with the alloca checking
69141+ of the STACKLEAK feature prevents all forms of kernel process stack
69142+ overflow abuse. Note that this is different from kernel stack
69143+ buffer overflows.
69144+
69145+config GRKERNSEC_BRUTE
69146+ bool "Deter exploit bruteforcing"
69147+ default y if GRKERNSEC_CONFIG_AUTO
69148+ help
69149+ If you say Y here, attempts to bruteforce exploits against forking
69150+ daemons such as apache or sshd, as well as against suid/sgid binaries
69151+ will be deterred. When a child of a forking daemon is killed by PaX
69152+ or crashes due to an illegal instruction or other suspicious signal,
69153+ the parent process will be delayed 30 seconds upon every subsequent
69154+ fork until the administrator is able to assess the situation and
69155+ restart the daemon.
69156+ In the suid/sgid case, the attempt is logged, the user has all their
69157+ existing instances of the suid/sgid binary terminated and will
69158+ be unable to execute any suid/sgid binaries for 15 minutes.
69159+
69160+ It is recommended that you also enable signal logging in the auditing
69161+ section so that logs are generated when a process triggers a suspicious
69162+ signal.
69163+ If the sysctl option is enabled, a sysctl option with name
69164+ "deter_bruteforce" is created.
69165+
69166+config GRKERNSEC_MODHARDEN
69167+ bool "Harden module auto-loading"
69168+ default y if GRKERNSEC_CONFIG_AUTO
69169+ depends on MODULES
69170+ help
69171+ If you say Y here, module auto-loading in response to use of some
69172+ feature implemented by an unloaded module will be restricted to
69173+ root users. Enabling this option helps defend against attacks
69174+ by unprivileged users who abuse the auto-loading behavior to
69175+ cause a vulnerable module to load that is then exploited.
69176+
69177+ If this option prevents a legitimate use of auto-loading for a
69178+ non-root user, the administrator can execute modprobe manually
69179+ with the exact name of the module mentioned in the alert log.
69180+ Alternatively, the administrator can add the module to the list
69181+ of modules loaded at boot by modifying init scripts.
69182+
69183+ Modification of init scripts will most likely be needed on
69184+ Ubuntu servers with encrypted home directory support enabled,
69185+ as the first non-root user logging in will cause the ecb(aes),
69186+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
69187+
69188+config GRKERNSEC_HIDESYM
69189+ bool "Hide kernel symbols"
69190+ default y if GRKERNSEC_CONFIG_AUTO
69191+ select PAX_USERCOPY_SLABS
69192+ help
69193+ If you say Y here, getting information on loaded modules, and
69194+ displaying all kernel symbols through a syscall will be restricted
69195+ to users with CAP_SYS_MODULE. For software compatibility reasons,
69196+ /proc/kallsyms will be restricted to the root user. The RBAC
69197+ system can hide that entry even from root.
69198+
69199+ This option also prevents leaking of kernel addresses through
69200+ several /proc entries.
69201+
69202+ Note that this option is only effective provided the following
69203+ conditions are met:
69204+ 1) The kernel using grsecurity is not precompiled by some distribution
69205+ 2) You have also enabled GRKERNSEC_DMESG
69206+ 3) You are using the RBAC system and hiding other files such as your
69207+ kernel image and System.map. Alternatively, enabling this option
69208+ causes the permissions on /boot, /lib/modules, and the kernel
69209+ source directory to change at compile time to prevent
69210+ reading by non-root users.
69211+ If the above conditions are met, this option will aid in providing a
69212+ useful protection against local kernel exploitation of overflows
69213+ and arbitrary read/write vulnerabilities.
69214+
69215+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
69216+ in addition to this feature.
69217+
69218+config GRKERNSEC_RANDSTRUCT
69219+ bool "Randomize layout of sensitive kernel structures"
69220+ default y if GRKERNSEC_CONFIG_AUTO
69221+ select GRKERNSEC_HIDESYM
69222+ select MODVERSIONS if MODULES
69223+ help
69224+ If you say Y here, the layouts of a number of sensitive kernel
69225+ structures (task, fs, cred, etc) and all structures composed entirely
69226+ of function pointers (aka "ops" structs) will be randomized at compile-time.
69227+ This can introduce the requirement of an additional infoleak
69228+ vulnerability for exploits targeting these structure types.
69229+
69230+ Enabling this feature will introduce some performance impact, slightly
69231+ increase memory usage, and prevent the use of forensic tools like
69232+ Volatility against the system (unless the kernel source tree isn't
69233+ cleaned after kernel installation).
69234+
69235+ The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
69236+ It remains after a make clean to allow for external modules to be compiled
69237+ with the existing seed and will be removed by a make mrproper or
69238+ make distclean.
69239+
69240+ Note that the implementation requires gcc 4.6.4. or newer. You may need
69241+ to install the supporting headers explicitly in addition to the normal
69242+ gcc package.
69243+
69244+config GRKERNSEC_RANDSTRUCT_PERFORMANCE
69245+ bool "Use cacheline-aware structure randomization"
69246+ depends on GRKERNSEC_RANDSTRUCT
69247+ default y if GRKERNSEC_CONFIG_PRIORITY_PERF
69248+ help
69249+ If you say Y here, the RANDSTRUCT randomization will make a best effort
69250+ at restricting randomization to cacheline-sized groups of elements. It
69251+ will further not randomize bitfields in structures. This reduces the
69252+ performance hit of RANDSTRUCT at the cost of weakened randomization.
69253+
69254+config GRKERNSEC_KERN_LOCKOUT
69255+ bool "Active kernel exploit response"
69256+ default y if GRKERNSEC_CONFIG_AUTO
69257+ depends on X86 || ARM || PPC || SPARC
69258+ help
69259+ If you say Y here, when a PaX alert is triggered due to suspicious
69260+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
69261+ or an OOPS occurs due to bad memory accesses, instead of just
69262+ terminating the offending process (and potentially allowing
69263+ a subsequent exploit from the same user), we will take one of two
69264+ actions:
69265+ If the user was root, we will panic the system
69266+ If the user was non-root, we will log the attempt, terminate
69267+ all processes owned by the user, then prevent them from creating
69268+ any new processes until the system is restarted
69269+ This deters repeated kernel exploitation/bruteforcing attempts
69270+ and is useful for later forensics.
69271+
69272+config GRKERNSEC_OLD_ARM_USERLAND
69273+ bool "Old ARM userland compatibility"
69274+ depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
69275+ help
69276+ If you say Y here, stubs of executable code to perform such operations
69277+ as "compare-exchange" will be placed at fixed locations in the ARM vector
69278+ table. This is unfortunately needed for old ARM userland meant to run
69279+ across a wide range of processors. Without this option enabled,
69280+ the get_tls and data memory barrier stubs will be emulated by the kernel,
69281+ which is enough for Linaro userlands or other userlands designed for v6
69282+ and newer ARM CPUs. It's recommended that you try without this option enabled
69283+ first, and only enable it if your userland does not boot (it will likely fail
69284+ at init time).
69285+
69286+endmenu
69287+menu "Role Based Access Control Options"
69288+depends on GRKERNSEC
69289+
69290+config GRKERNSEC_RBAC_DEBUG
69291+ bool
69292+
69293+config GRKERNSEC_NO_RBAC
69294+ bool "Disable RBAC system"
69295+ help
69296+ If you say Y here, the /dev/grsec device will be removed from the kernel,
69297+ preventing the RBAC system from being enabled. You should only say Y
69298+ here if you have no intention of using the RBAC system, so as to prevent
69299+ an attacker with root access from misusing the RBAC system to hide files
69300+ and processes when loadable module support and /dev/[k]mem have been
69301+ locked down.
69302+
69303+config GRKERNSEC_ACL_HIDEKERN
69304+ bool "Hide kernel processes"
69305+ help
69306+ If you say Y here, all kernel threads will be hidden to all
69307+ processes but those whose subject has the "view hidden processes"
69308+ flag.
69309+
69310+config GRKERNSEC_ACL_MAXTRIES
69311+ int "Maximum tries before password lockout"
69312+ default 3
69313+ help
69314+ This option enforces the maximum number of times a user can attempt
69315+ to authorize themselves with the grsecurity RBAC system before being
69316+ denied the ability to attempt authorization again for a specified time.
69317+ The lower the number, the harder it will be to brute-force a password.
69318+
69319+config GRKERNSEC_ACL_TIMEOUT
69320+ int "Time to wait after max password tries, in seconds"
69321+ default 30
69322+ help
69323+ This option specifies the time the user must wait after attempting to
69324+ authorize to the RBAC system with the maximum number of invalid
69325+ passwords. The higher the number, the harder it will be to brute-force
69326+ a password.
69327+
69328+endmenu
69329+menu "Filesystem Protections"
69330+depends on GRKERNSEC
69331+
69332+config GRKERNSEC_PROC
69333+ bool "Proc restrictions"
69334+ default y if GRKERNSEC_CONFIG_AUTO
69335+ help
69336+ If you say Y here, the permissions of the /proc filesystem
69337+ will be altered to enhance system security and privacy. You MUST
69338+ choose either a user only restriction or a user and group restriction.
69339+ Depending upon the option you choose, you can either restrict users to
69340+ see only the processes they themselves run, or choose a group that can
69341+ view all processes and files normally restricted to root if you choose
69342+ the "restrict to user only" option. NOTE: If you're running identd or
69343+ ntpd as a non-root user, you will have to run it as the group you
69344+ specify here.
69345+
69346+config GRKERNSEC_PROC_USER
69347+ bool "Restrict /proc to user only"
69348+ depends on GRKERNSEC_PROC
69349+ help
69350+ If you say Y here, non-root users will only be able to view their own
69351+ processes, and restricts them from viewing network-related information,
69352+ and viewing kernel symbol and module information.
69353+
69354+config GRKERNSEC_PROC_USERGROUP
69355+ bool "Allow special group"
69356+ default y if GRKERNSEC_CONFIG_AUTO
69357+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
69358+ help
69359+ If you say Y here, you will be able to select a group that will be
69360+ able to view all processes and network-related information. If you've
69361+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
69362+ remain hidden. This option is useful if you want to run identd as
69363+ a non-root user. The group you select may also be chosen at boot time
69364+ via "grsec_proc_gid=" on the kernel commandline.
69365+
69366+config GRKERNSEC_PROC_GID
69367+ int "GID for special group"
69368+ depends on GRKERNSEC_PROC_USERGROUP
69369+ default 1001
69370+
69371+config GRKERNSEC_PROC_ADD
69372+ bool "Additional restrictions"
69373+ default y if GRKERNSEC_CONFIG_AUTO
69374+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
69375+ help
69376+ If you say Y here, additional restrictions will be placed on
69377+ /proc that keep normal users from viewing device information and
69378+ slabinfo information that could be useful for exploits.
69379+
69380+config GRKERNSEC_LINK
69381+ bool "Linking restrictions"
69382+ default y if GRKERNSEC_CONFIG_AUTO
69383+ help
69384+ If you say Y here, /tmp race exploits will be prevented, since users
69385+ will no longer be able to follow symlinks owned by other users in
69386+ world-writable +t directories (e.g. /tmp), unless the owner of the
69387+ symlink is the owner of the directory. users will also not be
69388+ able to hardlink to files they do not own. If the sysctl option is
69389+ enabled, a sysctl option with name "linking_restrictions" is created.
69390+
69391+config GRKERNSEC_SYMLINKOWN
69392+ bool "Kernel-enforced SymlinksIfOwnerMatch"
69393+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
69394+ help
69395+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
69396+ that prevents it from being used as a security feature. As Apache
69397+ verifies the symlink by performing a stat() against the target of
69398+ the symlink before it is followed, an attacker can setup a symlink
69399+ to point to a same-owned file, then replace the symlink with one
69400+ that targets another user's file just after Apache "validates" the
69401+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
69402+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
69403+ will be in place for the group you specify. If the sysctl option
69404+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
69405+ created.
69406+
69407+config GRKERNSEC_SYMLINKOWN_GID
69408+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
69409+ depends on GRKERNSEC_SYMLINKOWN
69410+ default 1006
69411+ help
69412+ Setting this GID determines what group kernel-enforced
69413+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
69414+ is enabled, a sysctl option with name "symlinkown_gid" is created.
69415+
69416+config GRKERNSEC_FIFO
69417+ bool "FIFO restrictions"
69418+ default y if GRKERNSEC_CONFIG_AUTO
69419+ help
69420+ If you say Y here, users will not be able to write to FIFOs they don't
69421+ own in world-writable +t directories (e.g. /tmp), unless the owner of
69422+ the FIFO is the same owner of the directory it's held in. If the sysctl
69423+ option is enabled, a sysctl option with name "fifo_restrictions" is
69424+ created.
69425+
69426+config GRKERNSEC_SYSFS_RESTRICT
69427+ bool "Sysfs/debugfs restriction"
69428+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
69429+ depends on SYSFS
69430+ help
69431+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
69432+ any filesystem normally mounted under it (e.g. debugfs) will be
69433+ mostly accessible only by root. These filesystems generally provide access
69434+ to hardware and debug information that isn't appropriate for unprivileged
69435+ users of the system. Sysfs and debugfs have also become a large source
69436+ of new vulnerabilities, ranging from infoleaks to local compromise.
69437+ There has been very little oversight with an eye toward security involved
69438+ in adding new exporters of information to these filesystems, so their
69439+ use is discouraged.
69440+ For reasons of compatibility, a few directories have been whitelisted
69441+ for access by non-root users:
69442+ /sys/fs/selinux
69443+ /sys/fs/fuse
69444+ /sys/devices/system/cpu
69445+
69446+config GRKERNSEC_ROFS
69447+ bool "Runtime read-only mount protection"
69448+ depends on SYSCTL
69449+ help
69450+ If you say Y here, a sysctl option with name "romount_protect" will
69451+ be created. By setting this option to 1 at runtime, filesystems
69452+ will be protected in the following ways:
69453+ * No new writable mounts will be allowed
69454+ * Existing read-only mounts won't be able to be remounted read/write
69455+ * Write operations will be denied on all block devices
69456+ This option acts independently of grsec_lock: once it is set to 1,
69457+ it cannot be turned off. Therefore, please be mindful of the resulting
69458+ behavior if this option is enabled in an init script on a read-only
69459+ filesystem.
69460+ Also be aware that as with other root-focused features, GRKERNSEC_KMEM
69461+ and GRKERNSEC_IO should be enabled and module loading disabled via
69462+ config or at runtime.
69463+ This feature is mainly intended for secure embedded systems.
69464+
69465+
69466+config GRKERNSEC_DEVICE_SIDECHANNEL
69467+ bool "Eliminate stat/notify-based device sidechannels"
69468+ default y if GRKERNSEC_CONFIG_AUTO
69469+ help
69470+ If you say Y here, timing analyses on block or character
69471+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
69472+ will be thwarted for unprivileged users. If a process without
69473+ CAP_MKNOD stats such a device, the last access and last modify times
69474+ will match the device's create time. No access or modify events
69475+ will be triggered through inotify/dnotify/fanotify for such devices.
69476+ This feature will prevent attacks that may at a minimum
69477+ allow an attacker to determine the administrator's password length.
69478+
69479+config GRKERNSEC_CHROOT
69480+ bool "Chroot jail restrictions"
69481+ default y if GRKERNSEC_CONFIG_AUTO
69482+ help
69483+ If you say Y here, you will be able to choose several options that will
69484+ make breaking out of a chrooted jail much more difficult. If you
69485+ encounter no software incompatibilities with the following options, it
69486+ is recommended that you enable each one.
69487+
69488+ Note that the chroot restrictions are not intended to apply to "chroots"
69489+ to directories that are simple bind mounts of the global root filesystem.
69490+ For several other reasons, a user shouldn't expect any significant
69491+ security by performing such a chroot.
69492+
69493+config GRKERNSEC_CHROOT_MOUNT
69494+ bool "Deny mounts"
69495+ default y if GRKERNSEC_CONFIG_AUTO
69496+ depends on GRKERNSEC_CHROOT
69497+ help
69498+ If you say Y here, processes inside a chroot will not be able to
69499+ mount or remount filesystems. If the sysctl option is enabled, a
69500+ sysctl option with name "chroot_deny_mount" is created.
69501+
69502+config GRKERNSEC_CHROOT_DOUBLE
69503+ bool "Deny double-chroots"
69504+ default y if GRKERNSEC_CONFIG_AUTO
69505+ depends on GRKERNSEC_CHROOT
69506+ help
69507+ If you say Y here, processes inside a chroot will not be able to chroot
69508+ again outside the chroot. This is a widely used method of breaking
69509+ out of a chroot jail and should not be allowed. If the sysctl
69510+ option is enabled, a sysctl option with name
69511+ "chroot_deny_chroot" is created.
69512+
69513+config GRKERNSEC_CHROOT_PIVOT
69514+ bool "Deny pivot_root in chroot"
69515+ default y if GRKERNSEC_CONFIG_AUTO
69516+ depends on GRKERNSEC_CHROOT
69517+ help
69518+ If you say Y here, processes inside a chroot will not be able to use
69519+ a function called pivot_root() that was introduced in Linux 2.3.41. It
69520+ works similar to chroot in that it changes the root filesystem. This
69521+ function could be misused in a chrooted process to attempt to break out
69522+ of the chroot, and therefore should not be allowed. If the sysctl
69523+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
69524+ created.
69525+
69526+config GRKERNSEC_CHROOT_CHDIR
69527+ bool "Enforce chdir(\"/\") on all chroots"
69528+ default y if GRKERNSEC_CONFIG_AUTO
69529+ depends on GRKERNSEC_CHROOT
69530+ help
69531+ If you say Y here, the current working directory of all newly-chrooted
69532+ applications will be set to the the root directory of the chroot.
69533+ The man page on chroot(2) states:
69534+ Note that this call does not change the current working
69535+ directory, so that `.' can be outside the tree rooted at
69536+ `/'. In particular, the super-user can escape from a
69537+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
69538+
69539+ It is recommended that you say Y here, since it's not known to break
69540+ any software. If the sysctl option is enabled, a sysctl option with
69541+ name "chroot_enforce_chdir" is created.
69542+
69543+config GRKERNSEC_CHROOT_CHMOD
69544+ bool "Deny (f)chmod +s"
69545+ default y if GRKERNSEC_CONFIG_AUTO
69546+ depends on GRKERNSEC_CHROOT
69547+ help
69548+ If you say Y here, processes inside a chroot will not be able to chmod
69549+ or fchmod files to make them have suid or sgid bits. This protects
69550+ against another published method of breaking a chroot. If the sysctl
69551+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
69552+ created.
69553+
69554+config GRKERNSEC_CHROOT_FCHDIR
69555+ bool "Deny fchdir and fhandle out of chroot"
69556+ default y if GRKERNSEC_CONFIG_AUTO
69557+ depends on GRKERNSEC_CHROOT
69558+ help
69559+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
69560+ to a file descriptor of the chrooting process that points to a directory
69561+ outside the filesystem will be stopped. Additionally, this option prevents
69562+ use of the recently-created syscall for opening files by a guessable "file
69563+ handle" inside a chroot. If the sysctl option is enabled, a sysctl option
69564+ with name "chroot_deny_fchdir" is created.
69565+
69566+config GRKERNSEC_CHROOT_MKNOD
69567+ bool "Deny mknod"
69568+ default y if GRKERNSEC_CONFIG_AUTO
69569+ depends on GRKERNSEC_CHROOT
69570+ help
69571+ If you say Y here, processes inside a chroot will not be allowed to
69572+ mknod. The problem with using mknod inside a chroot is that it
69573+ would allow an attacker to create a device entry that is the same
69574+ as one on the physical root of your system, which could range from
69575+ anything from the console device to a device for your harddrive (which
69576+ they could then use to wipe the drive or steal data). It is recommended
69577+ that you say Y here, unless you run into software incompatibilities.
69578+ If the sysctl option is enabled, a sysctl option with name
69579+ "chroot_deny_mknod" is created.
69580+
69581+config GRKERNSEC_CHROOT_SHMAT
69582+ bool "Deny shmat() out of chroot"
69583+ default y if GRKERNSEC_CONFIG_AUTO
69584+ depends on GRKERNSEC_CHROOT
69585+ help
69586+ If you say Y here, processes inside a chroot will not be able to attach
69587+ to shared memory segments that were created outside of the chroot jail.
69588+ It is recommended that you say Y here. If the sysctl option is enabled,
69589+ a sysctl option with name "chroot_deny_shmat" is created.
69590+
69591+config GRKERNSEC_CHROOT_UNIX
69592+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
69593+ default y if GRKERNSEC_CONFIG_AUTO
69594+ depends on GRKERNSEC_CHROOT
69595+ help
69596+ If you say Y here, processes inside a chroot will not be able to
69597+ connect to abstract (meaning not belonging to a filesystem) Unix
69598+ domain sockets that were bound outside of a chroot. It is recommended
69599+ that you say Y here. If the sysctl option is enabled, a sysctl option
69600+ with name "chroot_deny_unix" is created.
69601+
69602+config GRKERNSEC_CHROOT_FINDTASK
69603+ bool "Protect outside processes"
69604+ default y if GRKERNSEC_CONFIG_AUTO
69605+ depends on GRKERNSEC_CHROOT
69606+ help
69607+ If you say Y here, processes inside a chroot will not be able to
69608+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
69609+ getsid, or view any process outside of the chroot. If the sysctl
69610+ option is enabled, a sysctl option with name "chroot_findtask" is
69611+ created.
69612+
69613+config GRKERNSEC_CHROOT_NICE
69614+ bool "Restrict priority changes"
69615+ default y if GRKERNSEC_CONFIG_AUTO
69616+ depends on GRKERNSEC_CHROOT
69617+ help
69618+ If you say Y here, processes inside a chroot will not be able to raise
69619+ the priority of processes in the chroot, or alter the priority of
69620+ processes outside the chroot. This provides more security than simply
69621+ removing CAP_SYS_NICE from the process' capability set. If the
69622+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
69623+ is created.
69624+
69625+config GRKERNSEC_CHROOT_SYSCTL
69626+ bool "Deny sysctl writes"
69627+ default y if GRKERNSEC_CONFIG_AUTO
69628+ depends on GRKERNSEC_CHROOT
69629+ help
69630+ If you say Y here, an attacker in a chroot will not be able to
69631+ write to sysctl entries, either by sysctl(2) or through a /proc
69632+ interface. It is strongly recommended that you say Y here. If the
69633+ sysctl option is enabled, a sysctl option with name
69634+ "chroot_deny_sysctl" is created.
69635+
69636+config GRKERNSEC_CHROOT_CAPS
69637+ bool "Capability restrictions"
69638+ default y if GRKERNSEC_CONFIG_AUTO
69639+ depends on GRKERNSEC_CHROOT
69640+ help
69641+ If you say Y here, the capabilities on all processes within a
69642+ chroot jail will be lowered to stop module insertion, raw i/o,
69643+ system and net admin tasks, rebooting the system, modifying immutable
69644+ files, modifying IPC owned by another, and changing the system time.
69645+ This is left an option because it can break some apps. Disable this
69646+ if your chrooted apps are having problems performing those kinds of
69647+ tasks. If the sysctl option is enabled, a sysctl option with
69648+ name "chroot_caps" is created.
69649+
69650+config GRKERNSEC_CHROOT_INITRD
69651+ bool "Exempt initrd tasks from restrictions"
69652+ default y if GRKERNSEC_CONFIG_AUTO
69653+ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
69654+ help
69655+ If you say Y here, tasks started prior to init will be exempted from
69656+ grsecurity's chroot restrictions. This option is mainly meant to
69657+ resolve Plymouth's performing privileged operations unnecessarily
69658+ in a chroot.
69659+
69660+endmenu
69661+menu "Kernel Auditing"
69662+depends on GRKERNSEC
69663+
69664+config GRKERNSEC_AUDIT_GROUP
69665+ bool "Single group for auditing"
69666+ help
69667+ If you say Y here, the exec and chdir logging features will only operate
69668+ on a group you specify. This option is recommended if you only want to
69669+ watch certain users instead of having a large amount of logs from the
69670+ entire system. If the sysctl option is enabled, a sysctl option with
69671+ name "audit_group" is created.
69672+
69673+config GRKERNSEC_AUDIT_GID
69674+ int "GID for auditing"
69675+ depends on GRKERNSEC_AUDIT_GROUP
69676+ default 1007
69677+
69678+config GRKERNSEC_EXECLOG
69679+ bool "Exec logging"
69680+ help
69681+ If you say Y here, all execve() calls will be logged (since the
69682+ other exec*() calls are frontends to execve(), all execution
69683+ will be logged). Useful for shell-servers that like to keep track
69684+ of their users. If the sysctl option is enabled, a sysctl option with
69685+ name "exec_logging" is created.
69686+ WARNING: This option when enabled will produce a LOT of logs, especially
69687+ on an active system.
69688+
69689+config GRKERNSEC_RESLOG
69690+ bool "Resource logging"
69691+ default y if GRKERNSEC_CONFIG_AUTO
69692+ help
69693+ If you say Y here, all attempts to overstep resource limits will
69694+ be logged with the resource name, the requested size, and the current
69695+ limit. It is highly recommended that you say Y here. If the sysctl
69696+ option is enabled, a sysctl option with name "resource_logging" is
69697+ created. If the RBAC system is enabled, the sysctl value is ignored.
69698+
69699+config GRKERNSEC_CHROOT_EXECLOG
69700+ bool "Log execs within chroot"
69701+ help
69702+ If you say Y here, all executions inside a chroot jail will be logged
69703+ to syslog. This can cause a large amount of logs if certain
69704+ applications (eg. djb's daemontools) are installed on the system, and
69705+ is therefore left as an option. If the sysctl option is enabled, a
69706+ sysctl option with name "chroot_execlog" is created.
69707+
69708+config GRKERNSEC_AUDIT_PTRACE
69709+ bool "Ptrace logging"
69710+ help
69711+ If you say Y here, all attempts to attach to a process via ptrace
69712+ will be logged. If the sysctl option is enabled, a sysctl option
69713+ with name "audit_ptrace" is created.
69714+
69715+config GRKERNSEC_AUDIT_CHDIR
69716+ bool "Chdir logging"
69717+ help
69718+ If you say Y here, all chdir() calls will be logged. If the sysctl
69719+ option is enabled, a sysctl option with name "audit_chdir" is created.
69720+
69721+config GRKERNSEC_AUDIT_MOUNT
69722+ bool "(Un)Mount logging"
69723+ help
69724+ If you say Y here, all mounts and unmounts will be logged. If the
69725+ sysctl option is enabled, a sysctl option with name "audit_mount" is
69726+ created.
69727+
69728+config GRKERNSEC_SIGNAL
69729+ bool "Signal logging"
69730+ default y if GRKERNSEC_CONFIG_AUTO
69731+ help
69732+ If you say Y here, certain important signals will be logged, such as
69733+ SIGSEGV, which will as a result inform you of when a error in a program
69734+ occurred, which in some cases could mean a possible exploit attempt.
69735+ If the sysctl option is enabled, a sysctl option with name
69736+ "signal_logging" is created.
69737+
69738+config GRKERNSEC_FORKFAIL
69739+ bool "Fork failure logging"
69740+ help
69741+ If you say Y here, all failed fork() attempts will be logged.
69742+ This could suggest a fork bomb, or someone attempting to overstep
69743+ their process limit. If the sysctl option is enabled, a sysctl option
69744+ with name "forkfail_logging" is created.
69745+
69746+config GRKERNSEC_TIME
69747+ bool "Time change logging"
69748+ default y if GRKERNSEC_CONFIG_AUTO
69749+ help
69750+ If you say Y here, any changes of the system clock will be logged.
69751+ If the sysctl option is enabled, a sysctl option with name
69752+ "timechange_logging" is created.
69753+
69754+config GRKERNSEC_PROC_IPADDR
69755+ bool "/proc/<pid>/ipaddr support"
69756+ default y if GRKERNSEC_CONFIG_AUTO
69757+ help
69758+ If you say Y here, a new entry will be added to each /proc/<pid>
69759+ directory that contains the IP address of the person using the task.
69760+ The IP is carried across local TCP and AF_UNIX stream sockets.
69761+ This information can be useful for IDS/IPSes to perform remote response
69762+ to a local attack. The entry is readable by only the owner of the
69763+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
69764+ the RBAC system), and thus does not create privacy concerns.
69765+
69766+config GRKERNSEC_RWXMAP_LOG
69767+ bool 'Denied RWX mmap/mprotect logging'
69768+ default y if GRKERNSEC_CONFIG_AUTO
69769+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
69770+ help
69771+ If you say Y here, calls to mmap() and mprotect() with explicit
69772+ usage of PROT_WRITE and PROT_EXEC together will be logged when
69773+ denied by the PAX_MPROTECT feature. This feature will also
69774+ log other problematic scenarios that can occur when PAX_MPROTECT
69775+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
69776+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
69777+ is created.
69778+
69779+endmenu
69780+
69781+menu "Executable Protections"
69782+depends on GRKERNSEC
69783+
69784+config GRKERNSEC_DMESG
69785+ bool "Dmesg(8) restriction"
69786+ default y if GRKERNSEC_CONFIG_AUTO
69787+ help
69788+ If you say Y here, non-root users will not be able to use dmesg(8)
69789+ to view the contents of the kernel's circular log buffer.
69790+ The kernel's log buffer often contains kernel addresses and other
69791+ identifying information useful to an attacker in fingerprinting a
69792+ system for a targeted exploit.
69793+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
69794+ created.
69795+
69796+config GRKERNSEC_HARDEN_PTRACE
69797+ bool "Deter ptrace-based process snooping"
69798+ default y if GRKERNSEC_CONFIG_AUTO
69799+ help
69800+ If you say Y here, TTY sniffers and other malicious monitoring
69801+ programs implemented through ptrace will be defeated. If you
69802+ have been using the RBAC system, this option has already been
69803+ enabled for several years for all users, with the ability to make
69804+ fine-grained exceptions.
69805+
69806+ This option only affects the ability of non-root users to ptrace
69807+ processes that are not a descendent of the ptracing process.
69808+ This means that strace ./binary and gdb ./binary will still work,
69809+ but attaching to arbitrary processes will not. If the sysctl
69810+ option is enabled, a sysctl option with name "harden_ptrace" is
69811+ created.
69812+
69813+config GRKERNSEC_PTRACE_READEXEC
69814+ bool "Require read access to ptrace sensitive binaries"
69815+ default y if GRKERNSEC_CONFIG_AUTO
69816+ help
69817+ If you say Y here, unprivileged users will not be able to ptrace unreadable
69818+ binaries. This option is useful in environments that
69819+ remove the read bits (e.g. file mode 4711) from suid binaries to
69820+ prevent infoleaking of their contents. This option adds
69821+ consistency to the use of that file mode, as the binary could normally
69822+ be read out when run without privileges while ptracing.
69823+
69824+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
69825+ is created.
69826+
69827+config GRKERNSEC_SETXID
69828+ bool "Enforce consistent multithreaded privileges"
69829+ default y if GRKERNSEC_CONFIG_AUTO
69830+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
69831+ help
69832+ If you say Y here, a change from a root uid to a non-root uid
69833+ in a multithreaded application will cause the resulting uids,
69834+ gids, supplementary groups, and capabilities in that thread
69835+ to be propagated to the other threads of the process. In most
69836+ cases this is unnecessary, as glibc will emulate this behavior
69837+ on behalf of the application. Other libcs do not act in the
69838+ same way, allowing the other threads of the process to continue
69839+ running with root privileges. If the sysctl option is enabled,
69840+ a sysctl option with name "consistent_setxid" is created.
69841+
69842+config GRKERNSEC_HARDEN_IPC
69843+ bool "Disallow access to overly-permissive IPC objects"
69844+ default y if GRKERNSEC_CONFIG_AUTO
69845+ depends on SYSVIPC
69846+ help
69847+ If you say Y here, access to overly-permissive IPC objects (shared
69848+ memory, message queues, and semaphores) will be denied for processes
69849+ given the following criteria beyond normal permission checks:
69850+ 1) If the IPC object is world-accessible and the euid doesn't match
69851+ that of the creator or current uid for the IPC object
69852+ 2) If the IPC object is group-accessible and the egid doesn't
69853+ match that of the creator or current gid for the IPC object
69854+ It's a common error to grant too much permission to these objects,
69855+ with impact ranging from denial of service and information leaking to
69856+ privilege escalation. This feature was developed in response to
69857+ research by Tim Brown:
69858+ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
69859+ who found hundreds of such insecure usages. Processes with
69860+ CAP_IPC_OWNER are still permitted to access these IPC objects.
69861+ If the sysctl option is enabled, a sysctl option with name
69862+ "harden_ipc" is created.
69863+
69864+config GRKERNSEC_TPE
69865+ bool "Trusted Path Execution (TPE)"
69866+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
69867+ help
69868+ If you say Y here, you will be able to choose a gid to add to the
69869+ supplementary groups of users you want to mark as "untrusted."
69870+ These users will not be able to execute any files that are not in
69871+ root-owned directories writable only by root. If the sysctl option
69872+ is enabled, a sysctl option with name "tpe" is created.
69873+
69874+config GRKERNSEC_TPE_ALL
69875+ bool "Partially restrict all non-root users"
69876+ depends on GRKERNSEC_TPE
69877+ help
69878+ If you say Y here, all non-root users will be covered under
69879+ a weaker TPE restriction. This is separate from, and in addition to,
69880+ the main TPE options that you have selected elsewhere. Thus, if a
69881+ "trusted" GID is chosen, this restriction applies to even that GID.
69882+ Under this restriction, all non-root users will only be allowed to
69883+ execute files in directories they own that are not group or
69884+ world-writable, or in directories owned by root and writable only by
69885+ root. If the sysctl option is enabled, a sysctl option with name
69886+ "tpe_restrict_all" is created.
69887+
69888+config GRKERNSEC_TPE_INVERT
69889+ bool "Invert GID option"
69890+ depends on GRKERNSEC_TPE
69891+ help
69892+ If you say Y here, the group you specify in the TPE configuration will
69893+ decide what group TPE restrictions will be *disabled* for. This
69894+ option is useful if you want TPE restrictions to be applied to most
69895+ users on the system. If the sysctl option is enabled, a sysctl option
69896+ with name "tpe_invert" is created. Unlike other sysctl options, this
69897+ entry will default to on for backward-compatibility.
69898+
69899+config GRKERNSEC_TPE_GID
69900+ int
69901+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
69902+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
69903+
69904+config GRKERNSEC_TPE_UNTRUSTED_GID
69905+ int "GID for TPE-untrusted users"
69906+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
69907+ default 1005
69908+ help
69909+ Setting this GID determines what group TPE restrictions will be
69910+ *enabled* for. If the sysctl option is enabled, a sysctl option
69911+ with name "tpe_gid" is created.
69912+
69913+config GRKERNSEC_TPE_TRUSTED_GID
69914+ int "GID for TPE-trusted users"
69915+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
69916+ default 1005
69917+ help
69918+ Setting this GID determines what group TPE restrictions will be
69919+ *disabled* for. If the sysctl option is enabled, a sysctl option
69920+ with name "tpe_gid" is created.
69921+
69922+endmenu
69923+menu "Network Protections"
69924+depends on GRKERNSEC
69925+
69926+config GRKERNSEC_BLACKHOLE
69927+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
69928+ default y if GRKERNSEC_CONFIG_AUTO
69929+ depends on NET
69930+ help
69931+ If you say Y here, neither TCP resets nor ICMP
69932+ destination-unreachable packets will be sent in response to packets
69933+ sent to ports for which no associated listening process exists.
69934+ It will also prevent the sending of ICMP protocol unreachable packets
69935+ in response to packets with unknown protocols.
69936+ This feature supports both IPV4 and IPV6 and exempts the
69937+ loopback interface from blackholing. Enabling this feature
69938+ makes a host more resilient to DoS attacks and reduces network
69939+ visibility against scanners.
69940+
69941+ The blackhole feature as-implemented is equivalent to the FreeBSD
69942+ blackhole feature, as it prevents RST responses to all packets, not
69943+ just SYNs. Under most application behavior this causes no
69944+ problems, but applications (like haproxy) may not close certain
69945+ connections in a way that cleanly terminates them on the remote
69946+ end, leaving the remote host in LAST_ACK state. Because of this
69947+ side-effect and to prevent intentional LAST_ACK DoSes, this
69948+ feature also adds automatic mitigation against such attacks.
69949+ The mitigation drastically reduces the amount of time a socket
69950+ can spend in LAST_ACK state. If you're using haproxy and not
69951+ all servers it connects to have this option enabled, consider
69952+ disabling this feature on the haproxy host.
69953+
69954+ If the sysctl option is enabled, two sysctl options with names
69955+ "ip_blackhole" and "lastack_retries" will be created.
69956+ While "ip_blackhole" takes the standard zero/non-zero on/off
69957+ toggle, "lastack_retries" uses the same kinds of values as
69958+ "tcp_retries1" and "tcp_retries2". The default value of 4
69959+ prevents a socket from lasting more than 45 seconds in LAST_ACK
69960+ state.
69961+
69962+config GRKERNSEC_NO_SIMULT_CONNECT
69963+ bool "Disable TCP Simultaneous Connect"
69964+ default y if GRKERNSEC_CONFIG_AUTO
69965+ depends on NET
69966+ help
69967+ If you say Y here, a feature by Willy Tarreau will be enabled that
69968+ removes a weakness in Linux's strict implementation of TCP that
69969+ allows two clients to connect to each other without either entering
69970+ a listening state. The weakness allows an attacker to easily prevent
69971+ a client from connecting to a known server provided the source port
69972+ for the connection is guessed correctly.
69973+
69974+ As the weakness could be used to prevent an antivirus or IPS from
69975+ fetching updates, or prevent an SSL gateway from fetching a CRL,
69976+ it should be eliminated by enabling this option. Though Linux is
69977+ one of few operating systems supporting simultaneous connect, it
69978+ has no legitimate use in practice and is rarely supported by firewalls.
69979+
69980+config GRKERNSEC_SOCKET
69981+ bool "Socket restrictions"
69982+ depends on NET
69983+ help
69984+ If you say Y here, you will be able to choose from several options.
69985+ If you assign a GID on your system and add it to the supplementary
69986+ groups of users you want to restrict socket access to, this patch
69987+ will perform up to three things, based on the option(s) you choose.
69988+
69989+config GRKERNSEC_SOCKET_ALL
69990+ bool "Deny any sockets to group"
69991+ depends on GRKERNSEC_SOCKET
69992+ help
69993+ If you say Y here, you will be able to choose a GID of whose users will
69994+ be unable to connect to other hosts from your machine or run server
69995+ applications from your machine. If the sysctl option is enabled, a
69996+ sysctl option with name "socket_all" is created.
69997+
69998+config GRKERNSEC_SOCKET_ALL_GID
69999+ int "GID to deny all sockets for"
70000+ depends on GRKERNSEC_SOCKET_ALL
70001+ default 1004
70002+ help
70003+ Here you can choose the GID to disable socket access for. Remember to
70004+ add the users you want socket access disabled for to the GID
70005+ specified here. If the sysctl option is enabled, a sysctl option
70006+ with name "socket_all_gid" is created.
70007+
70008+config GRKERNSEC_SOCKET_CLIENT
70009+ bool "Deny client sockets to group"
70010+ depends on GRKERNSEC_SOCKET
70011+ help
70012+ If you say Y here, you will be able to choose a GID of whose users will
70013+ be unable to connect to other hosts from your machine, but will be
70014+ able to run servers. If this option is enabled, all users in the group
70015+ you specify will have to use passive mode when initiating ftp transfers
70016+ from the shell on your machine. If the sysctl option is enabled, a
70017+ sysctl option with name "socket_client" is created.
70018+
70019+config GRKERNSEC_SOCKET_CLIENT_GID
70020+ int "GID to deny client sockets for"
70021+ depends on GRKERNSEC_SOCKET_CLIENT
70022+ default 1003
70023+ help
70024+ Here you can choose the GID to disable client socket access for.
70025+ Remember to add the users you want client socket access disabled for to
70026+ the GID specified here. If the sysctl option is enabled, a sysctl
70027+ option with name "socket_client_gid" is created.
70028+
70029+config GRKERNSEC_SOCKET_SERVER
70030+ bool "Deny server sockets to group"
70031+ depends on GRKERNSEC_SOCKET
70032+ help
70033+ If you say Y here, you will be able to choose a GID of whose users will
70034+ be unable to run server applications from your machine. If the sysctl
70035+ option is enabled, a sysctl option with name "socket_server" is created.
70036+
70037+config GRKERNSEC_SOCKET_SERVER_GID
70038+ int "GID to deny server sockets for"
70039+ depends on GRKERNSEC_SOCKET_SERVER
70040+ default 1002
70041+ help
70042+ Here you can choose the GID to disable server socket access for.
70043+ Remember to add the users you want server socket access disabled for to
70044+ the GID specified here. If the sysctl option is enabled, a sysctl
70045+ option with name "socket_server_gid" is created.
70046+
70047+endmenu
70048+
70049+menu "Physical Protections"
70050+depends on GRKERNSEC
70051+
70052+config GRKERNSEC_DENYUSB
70053+ bool "Deny new USB connections after toggle"
70054+ default y if GRKERNSEC_CONFIG_AUTO
70055+ depends on SYSCTL && USB_SUPPORT
70056+ help
70057+ If you say Y here, a new sysctl option with name "deny_new_usb"
70058+ will be created. Setting its value to 1 will prevent any new
70059+ USB devices from being recognized by the OS. Any attempted USB
70060+ device insertion will be logged. This option is intended to be
70061+ used against custom USB devices designed to exploit vulnerabilities
70062+ in various USB device drivers.
70063+
70064+ For greatest effectiveness, this sysctl should be set after any
70065+ relevant init scripts. This option is safe to enable in distros
70066+ as each user can choose whether or not to toggle the sysctl.
70067+
70068+config GRKERNSEC_DENYUSB_FORCE
70069+ bool "Reject all USB devices not connected at boot"
70070+ select USB
70071+ depends on GRKERNSEC_DENYUSB
70072+ help
70073+ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
70074+ that doesn't involve a sysctl entry. This option should only be
70075+ enabled if you're sure you want to deny all new USB connections
70076+ at runtime and don't want to modify init scripts. This should not
70077+ be enabled by distros. It forces the core USB code to be built
70078+ into the kernel image so that all devices connected at boot time
70079+ can be recognized and new USB device connections can be prevented
70080+ prior to init running.
70081+
70082+endmenu
70083+
70084+menu "Sysctl Support"
70085+depends on GRKERNSEC && SYSCTL
70086+
70087+config GRKERNSEC_SYSCTL
70088+ bool "Sysctl support"
70089+ default y if GRKERNSEC_CONFIG_AUTO
70090+ help
70091+ If you say Y here, you will be able to change the options that
70092+ grsecurity runs with at bootup, without having to recompile your
70093+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
70094+ to enable (1) or disable (0) various features. All the sysctl entries
70095+ are mutable until the "grsec_lock" entry is set to a non-zero value.
70096+ All features enabled in the kernel configuration are disabled at boot
70097+ if you do not say Y to the "Turn on features by default" option.
70098+ All options should be set at startup, and the grsec_lock entry should
70099+ be set to a non-zero value after all the options are set.
70100+ *THIS IS EXTREMELY IMPORTANT*
70101+
70102+config GRKERNSEC_SYSCTL_DISTRO
70103+ bool "Extra sysctl support for distro makers (READ HELP)"
70104+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
70105+ help
70106+ If you say Y here, additional sysctl options will be created
70107+ for features that affect processes running as root. Therefore,
70108+ it is critical when using this option that the grsec_lock entry be
70109+ enabled after boot. Only distros with prebuilt kernel packages
70110+ with this option enabled that can ensure grsec_lock is enabled
70111+ after boot should use this option.
70112+ *Failure to set grsec_lock after boot makes all grsec features
70113+ this option covers useless*
70114+
70115+ Currently this option creates the following sysctl entries:
70116+ "Disable Privileged I/O": "disable_priv_io"
70117+
70118+config GRKERNSEC_SYSCTL_ON
70119+ bool "Turn on features by default"
70120+ default y if GRKERNSEC_CONFIG_AUTO
70121+ depends on GRKERNSEC_SYSCTL
70122+ help
70123+ If you say Y here, instead of having all features enabled in the
70124+ kernel configuration disabled at boot time, the features will be
70125+ enabled at boot time. It is recommended you say Y here unless
70126+ there is some reason you would want all sysctl-tunable features to
70127+ be disabled by default. As mentioned elsewhere, it is important
70128+ to enable the grsec_lock entry once you have finished modifying
70129+ the sysctl entries.
70130+
70131+endmenu
70132+menu "Logging Options"
70133+depends on GRKERNSEC
70134+
70135+config GRKERNSEC_FLOODTIME
70136+ int "Seconds in between log messages (minimum)"
70137+ default 10
70138+ help
70139+ This option allows you to enforce the number of seconds between
70140+ grsecurity log messages. The default should be suitable for most
70141+ people, however, if you choose to change it, choose a value small enough
70142+ to allow informative logs to be produced, but large enough to
70143+ prevent flooding.
70144+
70145+ Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
70146+ any rate limiting on grsecurity log messages.
70147+
70148+config GRKERNSEC_FLOODBURST
70149+ int "Number of messages in a burst (maximum)"
70150+ default 6
70151+ help
70152+ This option allows you to choose the maximum number of messages allowed
70153+ within the flood time interval you chose in a separate option. The
70154+ default should be suitable for most people, however if you find that
70155+ many of your logs are being interpreted as flooding, you may want to
70156+ raise this value.
70157+
70158+ Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
70159+ any rate limiting on grsecurity log messages.
70160+
70161+endmenu
70162diff --git a/grsecurity/Makefile b/grsecurity/Makefile
70163new file mode 100644
70164index 0000000..30ababb
70165--- /dev/null
70166+++ b/grsecurity/Makefile
70167@@ -0,0 +1,54 @@
70168+# grsecurity – access control and security hardening for Linux
70169+# All code in this directory and various hooks located throughout the Linux kernel are
70170+# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
70171+# http://www.grsecurity.net spender@grsecurity.net
70172+#
70173+# This program is free software; you can redistribute it and/or
70174+# modify it under the terms of the GNU General Public License version 2
70175+# as published by the Free Software Foundation.
70176+#
70177+# This program is distributed in the hope that it will be useful,
70178+# but WITHOUT ANY WARRANTY; without even the implied warranty of
70179+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
70180+# GNU General Public License for more details.
70181+#
70182+# You should have received a copy of the GNU General Public License
70183+# along with this program; if not, write to the Free Software
70184+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
70185+
70186+KBUILD_CFLAGS += -Werror
70187+
70188+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
70189+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
70190+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
70191+ grsec_usb.o grsec_ipc.o grsec_proc.o
70192+
70193+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
70194+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
70195+ gracl_learn.o grsec_log.o gracl_policy.o
70196+ifdef CONFIG_COMPAT
70197+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
70198+endif
70199+
70200+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
70201+
70202+ifdef CONFIG_NET
70203+obj-y += grsec_sock.o
70204+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
70205+endif
70206+
70207+ifndef CONFIG_GRKERNSEC
70208+obj-y += grsec_disabled.o
70209+endif
70210+
70211+ifdef CONFIG_GRKERNSEC_HIDESYM
70212+extra-y := grsec_hidesym.o
70213+$(obj)/grsec_hidesym.o:
70214+ @-chmod -f 500 /boot
70215+ @-chmod -f 500 /lib/modules
70216+ @-chmod -f 500 /lib64/modules
70217+ @-chmod -f 500 /lib32/modules
70218+ @-chmod -f 700 .
70219+ @-chmod -f 700 $(objtree)
70220+ @echo ' grsec: protected kernel image paths'
70221+endif
70222diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
70223new file mode 100644
70224index 0000000..6ae3aa0
70225--- /dev/null
70226+++ b/grsecurity/gracl.c
70227@@ -0,0 +1,2703 @@
70228+#include <linux/kernel.h>
70229+#include <linux/module.h>
70230+#include <linux/sched.h>
70231+#include <linux/mm.h>
70232+#include <linux/file.h>
70233+#include <linux/fs.h>
70234+#include <linux/namei.h>
70235+#include <linux/mount.h>
70236+#include <linux/tty.h>
70237+#include <linux/proc_fs.h>
70238+#include <linux/lglock.h>
70239+#include <linux/slab.h>
70240+#include <linux/vmalloc.h>
70241+#include <linux/types.h>
70242+#include <linux/sysctl.h>
70243+#include <linux/netdevice.h>
70244+#include <linux/ptrace.h>
70245+#include <linux/gracl.h>
70246+#include <linux/gralloc.h>
70247+#include <linux/security.h>
70248+#include <linux/grinternal.h>
70249+#include <linux/pid_namespace.h>
70250+#include <linux/stop_machine.h>
70251+#include <linux/fdtable.h>
70252+#include <linux/percpu.h>
70253+#include <linux/lglock.h>
70254+#include <linux/hugetlb.h>
70255+#include <linux/posix-timers.h>
70256+#include <linux/prefetch.h>
70257+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
70258+#include <linux/magic.h>
70259+#include <linux/pagemap.h>
70260+#include "../fs/btrfs/async-thread.h"
70261+#include "../fs/btrfs/ctree.h"
70262+#include "../fs/btrfs/btrfs_inode.h"
70263+#endif
70264+#include "../fs/mount.h"
70265+
70266+#include <asm/uaccess.h>
70267+#include <asm/errno.h>
70268+#include <asm/mman.h>
70269+
70270+#define FOR_EACH_ROLE_START(role) \
70271+ role = running_polstate.role_list; \
70272+ while (role) {
70273+
70274+#define FOR_EACH_ROLE_END(role) \
70275+ role = role->prev; \
70276+ }
70277+
70278+extern struct path gr_real_root;
70279+
70280+static struct gr_policy_state running_polstate;
70281+struct gr_policy_state *polstate = &running_polstate;
70282+extern struct gr_alloc_state *current_alloc_state;
70283+
70284+extern char *gr_shared_page[4];
70285+DEFINE_RWLOCK(gr_inode_lock);
70286+
70287+static unsigned int gr_status __read_only = GR_STATUS_INIT;
70288+
70289+#ifdef CONFIG_NET
70290+extern struct vfsmount *sock_mnt;
70291+#endif
70292+
70293+extern struct vfsmount *pipe_mnt;
70294+extern struct vfsmount *shm_mnt;
70295+
70296+#ifdef CONFIG_HUGETLBFS
70297+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
70298+#endif
70299+
70300+extern u16 acl_sp_role_value;
70301+extern struct acl_object_label *fakefs_obj_rw;
70302+extern struct acl_object_label *fakefs_obj_rwx;
70303+
70304+int gr_acl_is_enabled(void)
70305+{
70306+ return (gr_status & GR_READY);
70307+}
70308+
70309+void gr_enable_rbac_system(void)
70310+{
70311+ pax_open_kernel();
70312+ gr_status |= GR_READY;
70313+ pax_close_kernel();
70314+}
70315+
70316+int gr_rbac_disable(void *unused)
70317+{
70318+ pax_open_kernel();
70319+ gr_status &= ~GR_READY;
70320+ pax_close_kernel();
70321+
70322+ return 0;
70323+}
70324+
70325+static inline dev_t __get_dev(const struct dentry *dentry)
70326+{
70327+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
70328+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
70329+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
70330+ else
70331+#endif
70332+ return dentry->d_sb->s_dev;
70333+}
70334+
70335+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
70336+{
70337+ return __get_dev(dentry);
70338+}
70339+
70340+static char gr_task_roletype_to_char(struct task_struct *task)
70341+{
70342+ switch (task->role->roletype &
70343+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
70344+ GR_ROLE_SPECIAL)) {
70345+ case GR_ROLE_DEFAULT:
70346+ return 'D';
70347+ case GR_ROLE_USER:
70348+ return 'U';
70349+ case GR_ROLE_GROUP:
70350+ return 'G';
70351+ case GR_ROLE_SPECIAL:
70352+ return 'S';
70353+ }
70354+
70355+ return 'X';
70356+}
70357+
70358+char gr_roletype_to_char(void)
70359+{
70360+ return gr_task_roletype_to_char(current);
70361+}
70362+
70363+__inline__ int
70364+gr_acl_tpe_check(void)
70365+{
70366+ if (unlikely(!(gr_status & GR_READY)))
70367+ return 0;
70368+ if (current->role->roletype & GR_ROLE_TPE)
70369+ return 1;
70370+ else
70371+ return 0;
70372+}
70373+
70374+int
70375+gr_handle_rawio(const struct inode *inode)
70376+{
70377+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
70378+ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
70379+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
70380+ !capable(CAP_SYS_RAWIO))
70381+ return 1;
70382+#endif
70383+ return 0;
70384+}
70385+
70386+int
70387+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
70388+{
70389+ if (likely(lena != lenb))
70390+ return 0;
70391+
70392+ return !memcmp(a, b, lena);
70393+}
70394+
70395+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
70396+{
70397+ *buflen -= namelen;
70398+ if (*buflen < 0)
70399+ return -ENAMETOOLONG;
70400+ *buffer -= namelen;
70401+ memcpy(*buffer, str, namelen);
70402+ return 0;
70403+}
70404+
70405+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
70406+{
70407+ return prepend(buffer, buflen, name->name, name->len);
70408+}
70409+
70410+static int prepend_path(const struct path *path, struct path *root,
70411+ char **buffer, int *buflen)
70412+{
70413+ struct dentry *dentry = path->dentry;
70414+ struct vfsmount *vfsmnt = path->mnt;
70415+ struct mount *mnt = real_mount(vfsmnt);
70416+ bool slash = false;
70417+ int error = 0;
70418+
70419+ while (dentry != root->dentry || vfsmnt != root->mnt) {
70420+ struct dentry * parent;
70421+
70422+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
70423+ /* Global root? */
70424+ if (!mnt_has_parent(mnt)) {
70425+ goto out;
70426+ }
70427+ dentry = mnt->mnt_mountpoint;
70428+ mnt = mnt->mnt_parent;
70429+ vfsmnt = &mnt->mnt;
70430+ continue;
70431+ }
70432+ parent = dentry->d_parent;
70433+ prefetch(parent);
70434+ spin_lock(&dentry->d_lock);
70435+ error = prepend_name(buffer, buflen, &dentry->d_name);
70436+ spin_unlock(&dentry->d_lock);
70437+ if (!error)
70438+ error = prepend(buffer, buflen, "/", 1);
70439+ if (error)
70440+ break;
70441+
70442+ slash = true;
70443+ dentry = parent;
70444+ }
70445+
70446+out:
70447+ if (!error && !slash)
70448+ error = prepend(buffer, buflen, "/", 1);
70449+
70450+ return error;
70451+}
70452+
70453+/* this must be called with mount_lock and rename_lock held */
70454+
70455+static char *__our_d_path(const struct path *path, struct path *root,
70456+ char *buf, int buflen)
70457+{
70458+ char *res = buf + buflen;
70459+ int error;
70460+
70461+ prepend(&res, &buflen, "\0", 1);
70462+ error = prepend_path(path, root, &res, &buflen);
70463+ if (error)
70464+ return ERR_PTR(error);
70465+
70466+ return res;
70467+}
70468+
70469+static char *
70470+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
70471+{
70472+ char *retval;
70473+
70474+ retval = __our_d_path(path, root, buf, buflen);
70475+ if (unlikely(IS_ERR(retval)))
70476+ retval = strcpy(buf, "<path too long>");
70477+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
70478+ retval[1] = '\0';
70479+
70480+ return retval;
70481+}
70482+
70483+static char *
70484+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
70485+ char *buf, int buflen)
70486+{
70487+ struct path path;
70488+ char *res;
70489+
70490+ path.dentry = (struct dentry *)dentry;
70491+ path.mnt = (struct vfsmount *)vfsmnt;
70492+
70493+ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
70494+ by the RBAC system */
70495+ res = gen_full_path(&path, &gr_real_root, buf, buflen);
70496+
70497+ return res;
70498+}
70499+
70500+static char *
70501+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
70502+ char *buf, int buflen)
70503+{
70504+ char *res;
70505+ struct path path;
70506+ struct path root;
70507+ struct task_struct *reaper = init_pid_ns.child_reaper;
70508+
70509+ path.dentry = (struct dentry *)dentry;
70510+ path.mnt = (struct vfsmount *)vfsmnt;
70511+
70512+ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
70513+ get_fs_root(reaper->fs, &root);
70514+
70515+ read_seqlock_excl(&mount_lock);
70516+ write_seqlock(&rename_lock);
70517+ res = gen_full_path(&path, &root, buf, buflen);
70518+ write_sequnlock(&rename_lock);
70519+ read_sequnlock_excl(&mount_lock);
70520+
70521+ path_put(&root);
70522+ return res;
70523+}
70524+
70525+char *
70526+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
70527+{
70528+ char *ret;
70529+ read_seqlock_excl(&mount_lock);
70530+ write_seqlock(&rename_lock);
70531+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
70532+ PAGE_SIZE);
70533+ write_sequnlock(&rename_lock);
70534+ read_sequnlock_excl(&mount_lock);
70535+ return ret;
70536+}
70537+
70538+static char *
70539+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
70540+{
70541+ char *ret;
70542+ char *buf;
70543+ int buflen;
70544+
70545+ read_seqlock_excl(&mount_lock);
70546+ write_seqlock(&rename_lock);
70547+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
70548+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
70549+ buflen = (int)(ret - buf);
70550+ if (buflen >= 5)
70551+ prepend(&ret, &buflen, "/proc", 5);
70552+ else
70553+ ret = strcpy(buf, "<path too long>");
70554+ write_sequnlock(&rename_lock);
70555+ read_sequnlock_excl(&mount_lock);
70556+ return ret;
70557+}
70558+
70559+char *
70560+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
70561+{
70562+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
70563+ PAGE_SIZE);
70564+}
70565+
70566+char *
70567+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
70568+{
70569+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
70570+ PAGE_SIZE);
70571+}
70572+
70573+char *
70574+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
70575+{
70576+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
70577+ PAGE_SIZE);
70578+}
70579+
70580+char *
70581+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
70582+{
70583+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
70584+ PAGE_SIZE);
70585+}
70586+
70587+char *
70588+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
70589+{
70590+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
70591+ PAGE_SIZE);
70592+}
70593+
70594+__inline__ __u32
70595+to_gr_audit(const __u32 reqmode)
70596+{
70597+ /* masks off auditable permission flags, then shifts them to create
70598+ auditing flags, and adds the special case of append auditing if
70599+ we're requesting write */
70600+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
70601+}
70602+
70603+struct acl_role_label *
70604+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
70605+ const gid_t gid)
70606+{
70607+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
70608+ struct acl_role_label *match;
70609+ struct role_allowed_ip *ipp;
70610+ unsigned int x;
70611+ u32 curr_ip = task->signal->saved_ip;
70612+
70613+ match = state->acl_role_set.r_hash[index];
70614+
70615+ while (match) {
70616+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
70617+ for (x = 0; x < match->domain_child_num; x++) {
70618+ if (match->domain_children[x] == uid)
70619+ goto found;
70620+ }
70621+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
70622+ break;
70623+ match = match->next;
70624+ }
70625+found:
70626+ if (match == NULL) {
70627+ try_group:
70628+ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
70629+ match = state->acl_role_set.r_hash[index];
70630+
70631+ while (match) {
70632+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
70633+ for (x = 0; x < match->domain_child_num; x++) {
70634+ if (match->domain_children[x] == gid)
70635+ goto found2;
70636+ }
70637+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
70638+ break;
70639+ match = match->next;
70640+ }
70641+found2:
70642+ if (match == NULL)
70643+ match = state->default_role;
70644+ if (match->allowed_ips == NULL)
70645+ return match;
70646+ else {
70647+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
70648+ if (likely
70649+ ((ntohl(curr_ip) & ipp->netmask) ==
70650+ (ntohl(ipp->addr) & ipp->netmask)))
70651+ return match;
70652+ }
70653+ match = state->default_role;
70654+ }
70655+ } else if (match->allowed_ips == NULL) {
70656+ return match;
70657+ } else {
70658+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
70659+ if (likely
70660+ ((ntohl(curr_ip) & ipp->netmask) ==
70661+ (ntohl(ipp->addr) & ipp->netmask)))
70662+ return match;
70663+ }
70664+ goto try_group;
70665+ }
70666+
70667+ return match;
70668+}
70669+
70670+static struct acl_role_label *
70671+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
70672+ const gid_t gid)
70673+{
70674+ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
70675+}
70676+
70677+struct acl_subject_label *
70678+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
70679+ const struct acl_role_label *role)
70680+{
70681+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
70682+ struct acl_subject_label *match;
70683+
70684+ match = role->subj_hash[index];
70685+
70686+ while (match && (match->inode != ino || match->device != dev ||
70687+ (match->mode & GR_DELETED))) {
70688+ match = match->next;
70689+ }
70690+
70691+ if (match && !(match->mode & GR_DELETED))
70692+ return match;
70693+ else
70694+ return NULL;
70695+}
70696+
70697+struct acl_subject_label *
70698+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
70699+ const struct acl_role_label *role)
70700+{
70701+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
70702+ struct acl_subject_label *match;
70703+
70704+ match = role->subj_hash[index];
70705+
70706+ while (match && (match->inode != ino || match->device != dev ||
70707+ !(match->mode & GR_DELETED))) {
70708+ match = match->next;
70709+ }
70710+
70711+ if (match && (match->mode & GR_DELETED))
70712+ return match;
70713+ else
70714+ return NULL;
70715+}
70716+
70717+static struct acl_object_label *
70718+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
70719+ const struct acl_subject_label *subj)
70720+{
70721+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
70722+ struct acl_object_label *match;
70723+
70724+ match = subj->obj_hash[index];
70725+
70726+ while (match && (match->inode != ino || match->device != dev ||
70727+ (match->mode & GR_DELETED))) {
70728+ match = match->next;
70729+ }
70730+
70731+ if (match && !(match->mode & GR_DELETED))
70732+ return match;
70733+ else
70734+ return NULL;
70735+}
70736+
70737+static struct acl_object_label *
70738+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
70739+ const struct acl_subject_label *subj)
70740+{
70741+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
70742+ struct acl_object_label *match;
70743+
70744+ match = subj->obj_hash[index];
70745+
70746+ while (match && (match->inode != ino || match->device != dev ||
70747+ !(match->mode & GR_DELETED))) {
70748+ match = match->next;
70749+ }
70750+
70751+ if (match && (match->mode & GR_DELETED))
70752+ return match;
70753+
70754+ match = subj->obj_hash[index];
70755+
70756+ while (match && (match->inode != ino || match->device != dev ||
70757+ (match->mode & GR_DELETED))) {
70758+ match = match->next;
70759+ }
70760+
70761+ if (match && !(match->mode & GR_DELETED))
70762+ return match;
70763+ else
70764+ return NULL;
70765+}
70766+
70767+struct name_entry *
70768+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
70769+{
70770+ unsigned int len = strlen(name);
70771+ unsigned int key = full_name_hash(name, len);
70772+ unsigned int index = key % state->name_set.n_size;
70773+ struct name_entry *match;
70774+
70775+ match = state->name_set.n_hash[index];
70776+
70777+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
70778+ match = match->next;
70779+
70780+ return match;
70781+}
70782+
70783+static struct name_entry *
70784+lookup_name_entry(const char *name)
70785+{
70786+ return __lookup_name_entry(&running_polstate, name);
70787+}
70788+
70789+static struct name_entry *
70790+lookup_name_entry_create(const char *name)
70791+{
70792+ unsigned int len = strlen(name);
70793+ unsigned int key = full_name_hash(name, len);
70794+ unsigned int index = key % running_polstate.name_set.n_size;
70795+ struct name_entry *match;
70796+
70797+ match = running_polstate.name_set.n_hash[index];
70798+
70799+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
70800+ !match->deleted))
70801+ match = match->next;
70802+
70803+ if (match && match->deleted)
70804+ return match;
70805+
70806+ match = running_polstate.name_set.n_hash[index];
70807+
70808+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
70809+ match->deleted))
70810+ match = match->next;
70811+
70812+ if (match && !match->deleted)
70813+ return match;
70814+ else
70815+ return NULL;
70816+}
70817+
70818+static struct inodev_entry *
70819+lookup_inodev_entry(const ino_t ino, const dev_t dev)
70820+{
70821+ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
70822+ struct inodev_entry *match;
70823+
70824+ match = running_polstate.inodev_set.i_hash[index];
70825+
70826+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
70827+ match = match->next;
70828+
70829+ return match;
70830+}
70831+
70832+void
70833+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
70834+{
70835+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
70836+ state->inodev_set.i_size);
70837+ struct inodev_entry **curr;
70838+
70839+ entry->prev = NULL;
70840+
70841+ curr = &state->inodev_set.i_hash[index];
70842+ if (*curr != NULL)
70843+ (*curr)->prev = entry;
70844+
70845+ entry->next = *curr;
70846+ *curr = entry;
70847+
70848+ return;
70849+}
70850+
70851+static void
70852+insert_inodev_entry(struct inodev_entry *entry)
70853+{
70854+ __insert_inodev_entry(&running_polstate, entry);
70855+}
70856+
70857+void
70858+insert_acl_obj_label(struct acl_object_label *obj,
70859+ struct acl_subject_label *subj)
70860+{
70861+ unsigned int index =
70862+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
70863+ struct acl_object_label **curr;
70864+
70865+ obj->prev = NULL;
70866+
70867+ curr = &subj->obj_hash[index];
70868+ if (*curr != NULL)
70869+ (*curr)->prev = obj;
70870+
70871+ obj->next = *curr;
70872+ *curr = obj;
70873+
70874+ return;
70875+}
70876+
70877+void
70878+insert_acl_subj_label(struct acl_subject_label *obj,
70879+ struct acl_role_label *role)
70880+{
70881+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
70882+ struct acl_subject_label **curr;
70883+
70884+ obj->prev = NULL;
70885+
70886+ curr = &role->subj_hash[index];
70887+ if (*curr != NULL)
70888+ (*curr)->prev = obj;
70889+
70890+ obj->next = *curr;
70891+ *curr = obj;
70892+
70893+ return;
70894+}
70895+
70896+/* derived from glibc fnmatch() 0: match, 1: no match*/
70897+
70898+static int
70899+glob_match(const char *p, const char *n)
70900+{
70901+ char c;
70902+
70903+ while ((c = *p++) != '\0') {
70904+ switch (c) {
70905+ case '?':
70906+ if (*n == '\0')
70907+ return 1;
70908+ else if (*n == '/')
70909+ return 1;
70910+ break;
70911+ case '\\':
70912+ if (*n != c)
70913+ return 1;
70914+ break;
70915+ case '*':
70916+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
70917+ if (*n == '/')
70918+ return 1;
70919+ else if (c == '?') {
70920+ if (*n == '\0')
70921+ return 1;
70922+ else
70923+ ++n;
70924+ }
70925+ }
70926+ if (c == '\0') {
70927+ return 0;
70928+ } else {
70929+ const char *endp;
70930+
70931+ if ((endp = strchr(n, '/')) == NULL)
70932+ endp = n + strlen(n);
70933+
70934+ if (c == '[') {
70935+ for (--p; n < endp; ++n)
70936+ if (!glob_match(p, n))
70937+ return 0;
70938+ } else if (c == '/') {
70939+ while (*n != '\0' && *n != '/')
70940+ ++n;
70941+ if (*n == '/' && !glob_match(p, n + 1))
70942+ return 0;
70943+ } else {
70944+ for (--p; n < endp; ++n)
70945+ if (*n == c && !glob_match(p, n))
70946+ return 0;
70947+ }
70948+
70949+ return 1;
70950+ }
70951+ case '[':
70952+ {
70953+ int not;
70954+ char cold;
70955+
70956+ if (*n == '\0' || *n == '/')
70957+ return 1;
70958+
70959+ not = (*p == '!' || *p == '^');
70960+ if (not)
70961+ ++p;
70962+
70963+ c = *p++;
70964+ for (;;) {
70965+ unsigned char fn = (unsigned char)*n;
70966+
70967+ if (c == '\0')
70968+ return 1;
70969+ else {
70970+ if (c == fn)
70971+ goto matched;
70972+ cold = c;
70973+ c = *p++;
70974+
70975+ if (c == '-' && *p != ']') {
70976+ unsigned char cend = *p++;
70977+
70978+ if (cend == '\0')
70979+ return 1;
70980+
70981+ if (cold <= fn && fn <= cend)
70982+ goto matched;
70983+
70984+ c = *p++;
70985+ }
70986+ }
70987+
70988+ if (c == ']')
70989+ break;
70990+ }
70991+ if (!not)
70992+ return 1;
70993+ break;
70994+ matched:
70995+ while (c != ']') {
70996+ if (c == '\0')
70997+ return 1;
70998+
70999+ c = *p++;
71000+ }
71001+ if (not)
71002+ return 1;
71003+ }
71004+ break;
71005+ default:
71006+ if (c != *n)
71007+ return 1;
71008+ }
71009+
71010+ ++n;
71011+ }
71012+
71013+ if (*n == '\0')
71014+ return 0;
71015+
71016+ if (*n == '/')
71017+ return 0;
71018+
71019+ return 1;
71020+}
71021+
71022+static struct acl_object_label *
71023+chk_glob_label(struct acl_object_label *globbed,
71024+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
71025+{
71026+ struct acl_object_label *tmp;
71027+
71028+ if (*path == NULL)
71029+ *path = gr_to_filename_nolock(dentry, mnt);
71030+
71031+ tmp = globbed;
71032+
71033+ while (tmp) {
71034+ if (!glob_match(tmp->filename, *path))
71035+ return tmp;
71036+ tmp = tmp->next;
71037+ }
71038+
71039+ return NULL;
71040+}
71041+
71042+static struct acl_object_label *
71043+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
71044+ const ino_t curr_ino, const dev_t curr_dev,
71045+ const struct acl_subject_label *subj, char **path, const int checkglob)
71046+{
71047+ struct acl_subject_label *tmpsubj;
71048+ struct acl_object_label *retval;
71049+ struct acl_object_label *retval2;
71050+
71051+ tmpsubj = (struct acl_subject_label *) subj;
71052+ read_lock(&gr_inode_lock);
71053+ do {
71054+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
71055+ if (retval) {
71056+ if (checkglob && retval->globbed) {
71057+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
71058+ if (retval2)
71059+ retval = retval2;
71060+ }
71061+ break;
71062+ }
71063+ } while ((tmpsubj = tmpsubj->parent_subject));
71064+ read_unlock(&gr_inode_lock);
71065+
71066+ return retval;
71067+}
71068+
71069+static __inline__ struct acl_object_label *
71070+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
71071+ struct dentry *curr_dentry,
71072+ const struct acl_subject_label *subj, char **path, const int checkglob)
71073+{
71074+ int newglob = checkglob;
71075+ ino_t inode;
71076+ dev_t device;
71077+
71078+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
71079+ as we don't want a / * rule to match instead of the / object
71080+ don't do this for create lookups that call this function though, since they're looking up
71081+ on the parent and thus need globbing checks on all paths
71082+ */
71083+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
71084+ newglob = GR_NO_GLOB;
71085+
71086+ spin_lock(&curr_dentry->d_lock);
71087+ inode = curr_dentry->d_inode->i_ino;
71088+ device = __get_dev(curr_dentry);
71089+ spin_unlock(&curr_dentry->d_lock);
71090+
71091+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
71092+}
71093+
71094+#ifdef CONFIG_HUGETLBFS
71095+static inline bool
71096+is_hugetlbfs_mnt(const struct vfsmount *mnt)
71097+{
71098+ int i;
71099+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
71100+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
71101+ return true;
71102+ }
71103+
71104+ return false;
71105+}
71106+#endif
71107+
71108+static struct acl_object_label *
71109+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71110+ const struct acl_subject_label *subj, char *path, const int checkglob)
71111+{
71112+ struct dentry *dentry = (struct dentry *) l_dentry;
71113+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
71114+ struct mount *real_mnt = real_mount(mnt);
71115+ struct acl_object_label *retval;
71116+ struct dentry *parent;
71117+
71118+ read_seqlock_excl(&mount_lock);
71119+ write_seqlock(&rename_lock);
71120+
71121+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
71122+#ifdef CONFIG_NET
71123+ mnt == sock_mnt ||
71124+#endif
71125+#ifdef CONFIG_HUGETLBFS
71126+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
71127+#endif
71128+ /* ignore Eric Biederman */
71129+ IS_PRIVATE(l_dentry->d_inode))) {
71130+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
71131+ goto out;
71132+ }
71133+
71134+ for (;;) {
71135+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
71136+ break;
71137+
71138+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
71139+ if (!mnt_has_parent(real_mnt))
71140+ break;
71141+
71142+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
71143+ if (retval != NULL)
71144+ goto out;
71145+
71146+ dentry = real_mnt->mnt_mountpoint;
71147+ real_mnt = real_mnt->mnt_parent;
71148+ mnt = &real_mnt->mnt;
71149+ continue;
71150+ }
71151+
71152+ parent = dentry->d_parent;
71153+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
71154+ if (retval != NULL)
71155+ goto out;
71156+
71157+ dentry = parent;
71158+ }
71159+
71160+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
71161+
71162+ /* gr_real_root is pinned so we don't have to hold a reference */
71163+ if (retval == NULL)
71164+ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
71165+out:
71166+ write_sequnlock(&rename_lock);
71167+ read_sequnlock_excl(&mount_lock);
71168+
71169+ BUG_ON(retval == NULL);
71170+
71171+ return retval;
71172+}
71173+
71174+static __inline__ struct acl_object_label *
71175+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71176+ const struct acl_subject_label *subj)
71177+{
71178+ char *path = NULL;
71179+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
71180+}
71181+
71182+static __inline__ struct acl_object_label *
71183+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71184+ const struct acl_subject_label *subj)
71185+{
71186+ char *path = NULL;
71187+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
71188+}
71189+
71190+static __inline__ struct acl_object_label *
71191+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71192+ const struct acl_subject_label *subj, char *path)
71193+{
71194+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
71195+}
71196+
71197+struct acl_subject_label *
71198+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
71199+ const struct acl_role_label *role)
71200+{
71201+ struct dentry *dentry = (struct dentry *) l_dentry;
71202+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
71203+ struct mount *real_mnt = real_mount(mnt);
71204+ struct acl_subject_label *retval;
71205+ struct dentry *parent;
71206+
71207+ read_seqlock_excl(&mount_lock);
71208+ write_seqlock(&rename_lock);
71209+
71210+ for (;;) {
71211+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
71212+ break;
71213+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
71214+ if (!mnt_has_parent(real_mnt))
71215+ break;
71216+
71217+ spin_lock(&dentry->d_lock);
71218+ read_lock(&gr_inode_lock);
71219+ retval =
71220+ lookup_acl_subj_label(dentry->d_inode->i_ino,
71221+ __get_dev(dentry), role);
71222+ read_unlock(&gr_inode_lock);
71223+ spin_unlock(&dentry->d_lock);
71224+ if (retval != NULL)
71225+ goto out;
71226+
71227+ dentry = real_mnt->mnt_mountpoint;
71228+ real_mnt = real_mnt->mnt_parent;
71229+ mnt = &real_mnt->mnt;
71230+ continue;
71231+ }
71232+
71233+ spin_lock(&dentry->d_lock);
71234+ read_lock(&gr_inode_lock);
71235+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
71236+ __get_dev(dentry), role);
71237+ read_unlock(&gr_inode_lock);
71238+ parent = dentry->d_parent;
71239+ spin_unlock(&dentry->d_lock);
71240+
71241+ if (retval != NULL)
71242+ goto out;
71243+
71244+ dentry = parent;
71245+ }
71246+
71247+ spin_lock(&dentry->d_lock);
71248+ read_lock(&gr_inode_lock);
71249+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
71250+ __get_dev(dentry), role);
71251+ read_unlock(&gr_inode_lock);
71252+ spin_unlock(&dentry->d_lock);
71253+
71254+ if (unlikely(retval == NULL)) {
71255+ /* gr_real_root is pinned, we don't need to hold a reference */
71256+ read_lock(&gr_inode_lock);
71257+ retval = lookup_acl_subj_label(gr_real_root.dentry->d_inode->i_ino,
71258+ __get_dev(gr_real_root.dentry), role);
71259+ read_unlock(&gr_inode_lock);
71260+ }
71261+out:
71262+ write_sequnlock(&rename_lock);
71263+ read_sequnlock_excl(&mount_lock);
71264+
71265+ BUG_ON(retval == NULL);
71266+
71267+ return retval;
71268+}
71269+
71270+void
71271+assign_special_role(const char *rolename)
71272+{
71273+ struct acl_object_label *obj;
71274+ struct acl_role_label *r;
71275+ struct acl_role_label *assigned = NULL;
71276+ struct task_struct *tsk;
71277+ struct file *filp;
71278+
71279+ FOR_EACH_ROLE_START(r)
71280+ if (!strcmp(rolename, r->rolename) &&
71281+ (r->roletype & GR_ROLE_SPECIAL)) {
71282+ assigned = r;
71283+ break;
71284+ }
71285+ FOR_EACH_ROLE_END(r)
71286+
71287+ if (!assigned)
71288+ return;
71289+
71290+ read_lock(&tasklist_lock);
71291+ read_lock(&grsec_exec_file_lock);
71292+
71293+ tsk = current->real_parent;
71294+ if (tsk == NULL)
71295+ goto out_unlock;
71296+
71297+ filp = tsk->exec_file;
71298+ if (filp == NULL)
71299+ goto out_unlock;
71300+
71301+ tsk->is_writable = 0;
71302+ tsk->inherited = 0;
71303+
71304+ tsk->acl_sp_role = 1;
71305+ tsk->acl_role_id = ++acl_sp_role_value;
71306+ tsk->role = assigned;
71307+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
71308+
71309+ /* ignore additional mmap checks for processes that are writable
71310+ by the default ACL */
71311+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
71312+ if (unlikely(obj->mode & GR_WRITE))
71313+ tsk->is_writable = 1;
71314+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
71315+ if (unlikely(obj->mode & GR_WRITE))
71316+ tsk->is_writable = 1;
71317+
71318+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71319+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
71320+ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
71321+#endif
71322+
71323+out_unlock:
71324+ read_unlock(&grsec_exec_file_lock);
71325+ read_unlock(&tasklist_lock);
71326+ return;
71327+}
71328+
71329+
71330+static void
71331+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
71332+{
71333+ struct task_struct *task = current;
71334+ const struct cred *cred = current_cred();
71335+
71336+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
71337+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
71338+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
71339+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
71340+
71341+ return;
71342+}
71343+
71344+static void
71345+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
71346+{
71347+ struct task_struct *task = current;
71348+ const struct cred *cred = current_cred();
71349+
71350+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
71351+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
71352+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
71353+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
71354+
71355+ return;
71356+}
71357+
71358+static void
71359+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
71360+{
71361+ struct task_struct *task = current;
71362+ const struct cred *cred = current_cred();
71363+
71364+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
71365+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
71366+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
71367+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
71368+
71369+ return;
71370+}
71371+
71372+static void
71373+gr_set_proc_res(struct task_struct *task)
71374+{
71375+ struct acl_subject_label *proc;
71376+ unsigned short i;
71377+
71378+ proc = task->acl;
71379+
71380+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
71381+ return;
71382+
71383+ for (i = 0; i < RLIM_NLIMITS; i++) {
71384+ if (!(proc->resmask & (1U << i)))
71385+ continue;
71386+
71387+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
71388+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
71389+
71390+ if (i == RLIMIT_CPU)
71391+ update_rlimit_cpu(task, proc->res[i].rlim_cur);
71392+ }
71393+
71394+ return;
71395+}
71396+
71397+/* both of the below must be called with
71398+ rcu_read_lock();
71399+ read_lock(&tasklist_lock);
71400+ read_lock(&grsec_exec_file_lock);
71401+*/
71402+
71403+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename)
71404+{
71405+ char *tmpname;
71406+ struct acl_subject_label *tmpsubj;
71407+ struct file *filp;
71408+ struct name_entry *nmatch;
71409+
71410+ filp = task->exec_file;
71411+ if (filp == NULL)
71412+ return NULL;
71413+
71414+ /* the following is to apply the correct subject
71415+ on binaries running when the RBAC system
71416+ is enabled, when the binaries have been
71417+ replaced or deleted since their execution
71418+ -----
71419+ when the RBAC system starts, the inode/dev
71420+ from exec_file will be one the RBAC system
71421+ is unaware of. It only knows the inode/dev
71422+ of the present file on disk, or the absence
71423+ of it.
71424+ */
71425+
71426+ if (filename)
71427+ nmatch = __lookup_name_entry(state, filename);
71428+ else {
71429+ preempt_disable();
71430+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
71431+
71432+ nmatch = __lookup_name_entry(state, tmpname);
71433+ preempt_enable();
71434+ }
71435+ tmpsubj = NULL;
71436+ if (nmatch) {
71437+ if (nmatch->deleted)
71438+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
71439+ else
71440+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
71441+ }
71442+ /* this also works for the reload case -- if we don't match a potentially inherited subject
71443+ then we fall back to a normal lookup based on the binary's ino/dev
71444+ */
71445+ if (tmpsubj == NULL)
71446+ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
71447+
71448+ return tmpsubj;
71449+}
71450+
71451+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename)
71452+{
71453+ return __gr_get_subject_for_task(&running_polstate, task, filename);
71454+}
71455+
71456+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
71457+{
71458+ struct acl_object_label *obj;
71459+ struct file *filp;
71460+
71461+ filp = task->exec_file;
71462+
71463+ task->acl = subj;
71464+ task->is_writable = 0;
71465+ /* ignore additional mmap checks for processes that are writable
71466+ by the default ACL */
71467+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
71468+ if (unlikely(obj->mode & GR_WRITE))
71469+ task->is_writable = 1;
71470+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
71471+ if (unlikely(obj->mode & GR_WRITE))
71472+ task->is_writable = 1;
71473+
71474+ gr_set_proc_res(task);
71475+
71476+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71477+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
71478+#endif
71479+}
71480+
71481+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
71482+{
71483+ __gr_apply_subject_to_task(&running_polstate, task, subj);
71484+}
71485+
71486+__u32
71487+gr_search_file(const struct dentry * dentry, const __u32 mode,
71488+ const struct vfsmount * mnt)
71489+{
71490+ __u32 retval = mode;
71491+ struct acl_subject_label *curracl;
71492+ struct acl_object_label *currobj;
71493+
71494+ if (unlikely(!(gr_status & GR_READY)))
71495+ return (mode & ~GR_AUDITS);
71496+
71497+ curracl = current->acl;
71498+
71499+ currobj = chk_obj_label(dentry, mnt, curracl);
71500+ retval = currobj->mode & mode;
71501+
71502+ /* if we're opening a specified transfer file for writing
71503+ (e.g. /dev/initctl), then transfer our role to init
71504+ */
71505+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
71506+ current->role->roletype & GR_ROLE_PERSIST)) {
71507+ struct task_struct *task = init_pid_ns.child_reaper;
71508+
71509+ if (task->role != current->role) {
71510+ struct acl_subject_label *subj;
71511+
71512+ task->acl_sp_role = 0;
71513+ task->acl_role_id = current->acl_role_id;
71514+ task->role = current->role;
71515+ rcu_read_lock();
71516+ read_lock(&grsec_exec_file_lock);
71517+ subj = gr_get_subject_for_task(task, NULL);
71518+ gr_apply_subject_to_task(task, subj);
71519+ read_unlock(&grsec_exec_file_lock);
71520+ rcu_read_unlock();
71521+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
71522+ }
71523+ }
71524+
71525+ if (unlikely
71526+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
71527+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
71528+ __u32 new_mode = mode;
71529+
71530+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
71531+
71532+ retval = new_mode;
71533+
71534+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
71535+ new_mode |= GR_INHERIT;
71536+
71537+ if (!(mode & GR_NOLEARN))
71538+ gr_log_learn(dentry, mnt, new_mode);
71539+ }
71540+
71541+ return retval;
71542+}
71543+
71544+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
71545+ const struct dentry *parent,
71546+ const struct vfsmount *mnt)
71547+{
71548+ struct name_entry *match;
71549+ struct acl_object_label *matchpo;
71550+ struct acl_subject_label *curracl;
71551+ char *path;
71552+
71553+ if (unlikely(!(gr_status & GR_READY)))
71554+ return NULL;
71555+
71556+ preempt_disable();
71557+ path = gr_to_filename_rbac(new_dentry, mnt);
71558+ match = lookup_name_entry_create(path);
71559+
71560+ curracl = current->acl;
71561+
71562+ if (match) {
71563+ read_lock(&gr_inode_lock);
71564+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
71565+ read_unlock(&gr_inode_lock);
71566+
71567+ if (matchpo) {
71568+ preempt_enable();
71569+ return matchpo;
71570+ }
71571+ }
71572+
71573+ // lookup parent
71574+
71575+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
71576+
71577+ preempt_enable();
71578+ return matchpo;
71579+}
71580+
71581+__u32
71582+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
71583+ const struct vfsmount * mnt, const __u32 mode)
71584+{
71585+ struct acl_object_label *matchpo;
71586+ __u32 retval;
71587+
71588+ if (unlikely(!(gr_status & GR_READY)))
71589+ return (mode & ~GR_AUDITS);
71590+
71591+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
71592+
71593+ retval = matchpo->mode & mode;
71594+
71595+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
71596+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
71597+ __u32 new_mode = mode;
71598+
71599+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
71600+
71601+ gr_log_learn(new_dentry, mnt, new_mode);
71602+ return new_mode;
71603+ }
71604+
71605+ return retval;
71606+}
71607+
71608+__u32
71609+gr_check_link(const struct dentry * new_dentry,
71610+ const struct dentry * parent_dentry,
71611+ const struct vfsmount * parent_mnt,
71612+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
71613+{
71614+ struct acl_object_label *obj;
71615+ __u32 oldmode, newmode;
71616+ __u32 needmode;
71617+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
71618+ GR_DELETE | GR_INHERIT;
71619+
71620+ if (unlikely(!(gr_status & GR_READY)))
71621+ return (GR_CREATE | GR_LINK);
71622+
71623+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
71624+ oldmode = obj->mode;
71625+
71626+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
71627+ newmode = obj->mode;
71628+
71629+ needmode = newmode & checkmodes;
71630+
71631+ // old name for hardlink must have at least the permissions of the new name
71632+ if ((oldmode & needmode) != needmode)
71633+ goto bad;
71634+
71635+ // if old name had restrictions/auditing, make sure the new name does as well
71636+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
71637+
71638+ // don't allow hardlinking of suid/sgid/fcapped files without permission
71639+ if (is_privileged_binary(old_dentry))
71640+ needmode |= GR_SETID;
71641+
71642+ if ((newmode & needmode) != needmode)
71643+ goto bad;
71644+
71645+ // enforce minimum permissions
71646+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
71647+ return newmode;
71648+bad:
71649+ needmode = oldmode;
71650+ if (is_privileged_binary(old_dentry))
71651+ needmode |= GR_SETID;
71652+
71653+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
71654+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
71655+ return (GR_CREATE | GR_LINK);
71656+ } else if (newmode & GR_SUPPRESS)
71657+ return GR_SUPPRESS;
71658+ else
71659+ return 0;
71660+}
71661+
71662+int
71663+gr_check_hidden_task(const struct task_struct *task)
71664+{
71665+ if (unlikely(!(gr_status & GR_READY)))
71666+ return 0;
71667+
71668+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
71669+ return 1;
71670+
71671+ return 0;
71672+}
71673+
71674+int
71675+gr_check_protected_task(const struct task_struct *task)
71676+{
71677+ if (unlikely(!(gr_status & GR_READY) || !task))
71678+ return 0;
71679+
71680+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
71681+ task->acl != current->acl)
71682+ return 1;
71683+
71684+ return 0;
71685+}
71686+
71687+int
71688+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
71689+{
71690+ struct task_struct *p;
71691+ int ret = 0;
71692+
71693+ if (unlikely(!(gr_status & GR_READY) || !pid))
71694+ return ret;
71695+
71696+ read_lock(&tasklist_lock);
71697+ do_each_pid_task(pid, type, p) {
71698+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
71699+ p->acl != current->acl) {
71700+ ret = 1;
71701+ goto out;
71702+ }
71703+ } while_each_pid_task(pid, type, p);
71704+out:
71705+ read_unlock(&tasklist_lock);
71706+
71707+ return ret;
71708+}
71709+
71710+void
71711+gr_copy_label(struct task_struct *tsk)
71712+{
71713+ struct task_struct *p = current;
71714+
71715+ tsk->inherited = p->inherited;
71716+ tsk->acl_sp_role = 0;
71717+ tsk->acl_role_id = p->acl_role_id;
71718+ tsk->acl = p->acl;
71719+ tsk->role = p->role;
71720+ tsk->signal->used_accept = 0;
71721+ tsk->signal->curr_ip = p->signal->curr_ip;
71722+ tsk->signal->saved_ip = p->signal->saved_ip;
71723+ if (p->exec_file)
71724+ get_file(p->exec_file);
71725+ tsk->exec_file = p->exec_file;
71726+ tsk->is_writable = p->is_writable;
71727+ if (unlikely(p->signal->used_accept)) {
71728+ p->signal->curr_ip = 0;
71729+ p->signal->saved_ip = 0;
71730+ }
71731+
71732+ return;
71733+}
71734+
71735+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
71736+
71737+int
71738+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
71739+{
71740+ unsigned int i;
71741+ __u16 num;
71742+ uid_t *uidlist;
71743+ uid_t curuid;
71744+ int realok = 0;
71745+ int effectiveok = 0;
71746+ int fsok = 0;
71747+ uid_t globalreal, globaleffective, globalfs;
71748+
71749+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
71750+ struct user_struct *user;
71751+
71752+ if (!uid_valid(real))
71753+ goto skipit;
71754+
71755+ /* find user based on global namespace */
71756+
71757+ globalreal = GR_GLOBAL_UID(real);
71758+
71759+ user = find_user(make_kuid(&init_user_ns, globalreal));
71760+ if (user == NULL)
71761+ goto skipit;
71762+
71763+ if (gr_process_kernel_setuid_ban(user)) {
71764+ /* for find_user */
71765+ free_uid(user);
71766+ return 1;
71767+ }
71768+
71769+ /* for find_user */
71770+ free_uid(user);
71771+
71772+skipit:
71773+#endif
71774+
71775+ if (unlikely(!(gr_status & GR_READY)))
71776+ return 0;
71777+
71778+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
71779+ gr_log_learn_uid_change(real, effective, fs);
71780+
71781+ num = current->acl->user_trans_num;
71782+ uidlist = current->acl->user_transitions;
71783+
71784+ if (uidlist == NULL)
71785+ return 0;
71786+
71787+ if (!uid_valid(real)) {
71788+ realok = 1;
71789+ globalreal = (uid_t)-1;
71790+ } else {
71791+ globalreal = GR_GLOBAL_UID(real);
71792+ }
71793+ if (!uid_valid(effective)) {
71794+ effectiveok = 1;
71795+ globaleffective = (uid_t)-1;
71796+ } else {
71797+ globaleffective = GR_GLOBAL_UID(effective);
71798+ }
71799+ if (!uid_valid(fs)) {
71800+ fsok = 1;
71801+ globalfs = (uid_t)-1;
71802+ } else {
71803+ globalfs = GR_GLOBAL_UID(fs);
71804+ }
71805+
71806+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
71807+ for (i = 0; i < num; i++) {
71808+ curuid = uidlist[i];
71809+ if (globalreal == curuid)
71810+ realok = 1;
71811+ if (globaleffective == curuid)
71812+ effectiveok = 1;
71813+ if (globalfs == curuid)
71814+ fsok = 1;
71815+ }
71816+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
71817+ for (i = 0; i < num; i++) {
71818+ curuid = uidlist[i];
71819+ if (globalreal == curuid)
71820+ break;
71821+ if (globaleffective == curuid)
71822+ break;
71823+ if (globalfs == curuid)
71824+ break;
71825+ }
71826+ /* not in deny list */
71827+ if (i == num) {
71828+ realok = 1;
71829+ effectiveok = 1;
71830+ fsok = 1;
71831+ }
71832+ }
71833+
71834+ if (realok && effectiveok && fsok)
71835+ return 0;
71836+ else {
71837+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
71838+ return 1;
71839+ }
71840+}
71841+
71842+int
71843+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
71844+{
71845+ unsigned int i;
71846+ __u16 num;
71847+ gid_t *gidlist;
71848+ gid_t curgid;
71849+ int realok = 0;
71850+ int effectiveok = 0;
71851+ int fsok = 0;
71852+ gid_t globalreal, globaleffective, globalfs;
71853+
71854+ if (unlikely(!(gr_status & GR_READY)))
71855+ return 0;
71856+
71857+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
71858+ gr_log_learn_gid_change(real, effective, fs);
71859+
71860+ num = current->acl->group_trans_num;
71861+ gidlist = current->acl->group_transitions;
71862+
71863+ if (gidlist == NULL)
71864+ return 0;
71865+
71866+ if (!gid_valid(real)) {
71867+ realok = 1;
71868+ globalreal = (gid_t)-1;
71869+ } else {
71870+ globalreal = GR_GLOBAL_GID(real);
71871+ }
71872+ if (!gid_valid(effective)) {
71873+ effectiveok = 1;
71874+ globaleffective = (gid_t)-1;
71875+ } else {
71876+ globaleffective = GR_GLOBAL_GID(effective);
71877+ }
71878+ if (!gid_valid(fs)) {
71879+ fsok = 1;
71880+ globalfs = (gid_t)-1;
71881+ } else {
71882+ globalfs = GR_GLOBAL_GID(fs);
71883+ }
71884+
71885+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
71886+ for (i = 0; i < num; i++) {
71887+ curgid = gidlist[i];
71888+ if (globalreal == curgid)
71889+ realok = 1;
71890+ if (globaleffective == curgid)
71891+ effectiveok = 1;
71892+ if (globalfs == curgid)
71893+ fsok = 1;
71894+ }
71895+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
71896+ for (i = 0; i < num; i++) {
71897+ curgid = gidlist[i];
71898+ if (globalreal == curgid)
71899+ break;
71900+ if (globaleffective == curgid)
71901+ break;
71902+ if (globalfs == curgid)
71903+ break;
71904+ }
71905+ /* not in deny list */
71906+ if (i == num) {
71907+ realok = 1;
71908+ effectiveok = 1;
71909+ fsok = 1;
71910+ }
71911+ }
71912+
71913+ if (realok && effectiveok && fsok)
71914+ return 0;
71915+ else {
71916+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
71917+ return 1;
71918+ }
71919+}
71920+
71921+extern int gr_acl_is_capable(const int cap);
71922+
71923+void
71924+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
71925+{
71926+ struct acl_role_label *role = task->role;
71927+ struct acl_subject_label *subj = NULL;
71928+ struct acl_object_label *obj;
71929+ struct file *filp;
71930+ uid_t uid;
71931+ gid_t gid;
71932+
71933+ if (unlikely(!(gr_status & GR_READY)))
71934+ return;
71935+
71936+ uid = GR_GLOBAL_UID(kuid);
71937+ gid = GR_GLOBAL_GID(kgid);
71938+
71939+ filp = task->exec_file;
71940+
71941+ /* kernel process, we'll give them the kernel role */
71942+ if (unlikely(!filp)) {
71943+ task->role = running_polstate.kernel_role;
71944+ task->acl = running_polstate.kernel_role->root_label;
71945+ return;
71946+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
71947+ /* save the current ip at time of role lookup so that the proper
71948+ IP will be learned for role_allowed_ip */
71949+ task->signal->saved_ip = task->signal->curr_ip;
71950+ role = lookup_acl_role_label(task, uid, gid);
71951+ }
71952+
71953+ /* don't change the role if we're not a privileged process */
71954+ if (role && task->role != role &&
71955+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
71956+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
71957+ return;
71958+
71959+ /* perform subject lookup in possibly new role
71960+ we can use this result below in the case where role == task->role
71961+ */
71962+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
71963+
71964+ /* if we changed uid/gid, but result in the same role
71965+ and are using inheritance, don't lose the inherited subject
71966+ if current subject is other than what normal lookup
71967+ would result in, we arrived via inheritance, don't
71968+ lose subject
71969+ */
71970+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
71971+ (subj == task->acl)))
71972+ task->acl = subj;
71973+
71974+ /* leave task->inherited unaffected */
71975+
71976+ task->role = role;
71977+
71978+ task->is_writable = 0;
71979+
71980+ /* ignore additional mmap checks for processes that are writable
71981+ by the default ACL */
71982+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
71983+ if (unlikely(obj->mode & GR_WRITE))
71984+ task->is_writable = 1;
71985+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
71986+ if (unlikely(obj->mode & GR_WRITE))
71987+ task->is_writable = 1;
71988+
71989+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71990+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
71991+#endif
71992+
71993+ gr_set_proc_res(task);
71994+
71995+ return;
71996+}
71997+
71998+int
71999+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
72000+ const int unsafe_flags)
72001+{
72002+ struct task_struct *task = current;
72003+ struct acl_subject_label *newacl;
72004+ struct acl_object_label *obj;
72005+ __u32 retmode;
72006+
72007+ if (unlikely(!(gr_status & GR_READY)))
72008+ return 0;
72009+
72010+ newacl = chk_subj_label(dentry, mnt, task->role);
72011+
72012+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
72013+ did an exec
72014+ */
72015+ rcu_read_lock();
72016+ read_lock(&tasklist_lock);
72017+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
72018+ (task->parent->acl->mode & GR_POVERRIDE))) {
72019+ read_unlock(&tasklist_lock);
72020+ rcu_read_unlock();
72021+ goto skip_check;
72022+ }
72023+ read_unlock(&tasklist_lock);
72024+ rcu_read_unlock();
72025+
72026+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
72027+ !(task->role->roletype & GR_ROLE_GOD) &&
72028+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
72029+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
72030+ if (unsafe_flags & LSM_UNSAFE_SHARE)
72031+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
72032+ else
72033+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
72034+ return -EACCES;
72035+ }
72036+
72037+skip_check:
72038+
72039+ obj = chk_obj_label(dentry, mnt, task->acl);
72040+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
72041+
72042+ if (!(task->acl->mode & GR_INHERITLEARN) &&
72043+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
72044+ if (obj->nested)
72045+ task->acl = obj->nested;
72046+ else
72047+ task->acl = newacl;
72048+ task->inherited = 0;
72049+ } else {
72050+ task->inherited = 1;
72051+ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
72052+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
72053+ }
72054+
72055+ task->is_writable = 0;
72056+
72057+ /* ignore additional mmap checks for processes that are writable
72058+ by the default ACL */
72059+ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
72060+ if (unlikely(obj->mode & GR_WRITE))
72061+ task->is_writable = 1;
72062+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
72063+ if (unlikely(obj->mode & GR_WRITE))
72064+ task->is_writable = 1;
72065+
72066+ gr_set_proc_res(task);
72067+
72068+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
72069+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
72070+#endif
72071+ return 0;
72072+}
72073+
72074+/* always called with valid inodev ptr */
72075+static void
72076+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
72077+{
72078+ struct acl_object_label *matchpo;
72079+ struct acl_subject_label *matchps;
72080+ struct acl_subject_label *subj;
72081+ struct acl_role_label *role;
72082+ unsigned int x;
72083+
72084+ FOR_EACH_ROLE_START(role)
72085+ FOR_EACH_SUBJECT_START(role, subj, x)
72086+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
72087+ matchpo->mode |= GR_DELETED;
72088+ FOR_EACH_SUBJECT_END(subj,x)
72089+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
72090+ /* nested subjects aren't in the role's subj_hash table */
72091+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
72092+ matchpo->mode |= GR_DELETED;
72093+ FOR_EACH_NESTED_SUBJECT_END(subj)
72094+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
72095+ matchps->mode |= GR_DELETED;
72096+ FOR_EACH_ROLE_END(role)
72097+
72098+ inodev->nentry->deleted = 1;
72099+
72100+ return;
72101+}
72102+
72103+void
72104+gr_handle_delete(const ino_t ino, const dev_t dev)
72105+{
72106+ struct inodev_entry *inodev;
72107+
72108+ if (unlikely(!(gr_status & GR_READY)))
72109+ return;
72110+
72111+ write_lock(&gr_inode_lock);
72112+ inodev = lookup_inodev_entry(ino, dev);
72113+ if (inodev != NULL)
72114+ do_handle_delete(inodev, ino, dev);
72115+ write_unlock(&gr_inode_lock);
72116+
72117+ return;
72118+}
72119+
72120+static void
72121+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
72122+ const ino_t newinode, const dev_t newdevice,
72123+ struct acl_subject_label *subj)
72124+{
72125+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
72126+ struct acl_object_label *match;
72127+
72128+ match = subj->obj_hash[index];
72129+
72130+ while (match && (match->inode != oldinode ||
72131+ match->device != olddevice ||
72132+ !(match->mode & GR_DELETED)))
72133+ match = match->next;
72134+
72135+ if (match && (match->inode == oldinode)
72136+ && (match->device == olddevice)
72137+ && (match->mode & GR_DELETED)) {
72138+ if (match->prev == NULL) {
72139+ subj->obj_hash[index] = match->next;
72140+ if (match->next != NULL)
72141+ match->next->prev = NULL;
72142+ } else {
72143+ match->prev->next = match->next;
72144+ if (match->next != NULL)
72145+ match->next->prev = match->prev;
72146+ }
72147+ match->prev = NULL;
72148+ match->next = NULL;
72149+ match->inode = newinode;
72150+ match->device = newdevice;
72151+ match->mode &= ~GR_DELETED;
72152+
72153+ insert_acl_obj_label(match, subj);
72154+ }
72155+
72156+ return;
72157+}
72158+
72159+static void
72160+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
72161+ const ino_t newinode, const dev_t newdevice,
72162+ struct acl_role_label *role)
72163+{
72164+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
72165+ struct acl_subject_label *match;
72166+
72167+ match = role->subj_hash[index];
72168+
72169+ while (match && (match->inode != oldinode ||
72170+ match->device != olddevice ||
72171+ !(match->mode & GR_DELETED)))
72172+ match = match->next;
72173+
72174+ if (match && (match->inode == oldinode)
72175+ && (match->device == olddevice)
72176+ && (match->mode & GR_DELETED)) {
72177+ if (match->prev == NULL) {
72178+ role->subj_hash[index] = match->next;
72179+ if (match->next != NULL)
72180+ match->next->prev = NULL;
72181+ } else {
72182+ match->prev->next = match->next;
72183+ if (match->next != NULL)
72184+ match->next->prev = match->prev;
72185+ }
72186+ match->prev = NULL;
72187+ match->next = NULL;
72188+ match->inode = newinode;
72189+ match->device = newdevice;
72190+ match->mode &= ~GR_DELETED;
72191+
72192+ insert_acl_subj_label(match, role);
72193+ }
72194+
72195+ return;
72196+}
72197+
72198+static void
72199+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
72200+ const ino_t newinode, const dev_t newdevice)
72201+{
72202+ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
72203+ struct inodev_entry *match;
72204+
72205+ match = running_polstate.inodev_set.i_hash[index];
72206+
72207+ while (match && (match->nentry->inode != oldinode ||
72208+ match->nentry->device != olddevice || !match->nentry->deleted))
72209+ match = match->next;
72210+
72211+ if (match && (match->nentry->inode == oldinode)
72212+ && (match->nentry->device == olddevice) &&
72213+ match->nentry->deleted) {
72214+ if (match->prev == NULL) {
72215+ running_polstate.inodev_set.i_hash[index] = match->next;
72216+ if (match->next != NULL)
72217+ match->next->prev = NULL;
72218+ } else {
72219+ match->prev->next = match->next;
72220+ if (match->next != NULL)
72221+ match->next->prev = match->prev;
72222+ }
72223+ match->prev = NULL;
72224+ match->next = NULL;
72225+ match->nentry->inode = newinode;
72226+ match->nentry->device = newdevice;
72227+ match->nentry->deleted = 0;
72228+
72229+ insert_inodev_entry(match);
72230+ }
72231+
72232+ return;
72233+}
72234+
72235+static void
72236+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
72237+{
72238+ struct acl_subject_label *subj;
72239+ struct acl_role_label *role;
72240+ unsigned int x;
72241+
72242+ FOR_EACH_ROLE_START(role)
72243+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
72244+
72245+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
72246+ if ((subj->inode == ino) && (subj->device == dev)) {
72247+ subj->inode = ino;
72248+ subj->device = dev;
72249+ }
72250+ /* nested subjects aren't in the role's subj_hash table */
72251+ update_acl_obj_label(matchn->inode, matchn->device,
72252+ ino, dev, subj);
72253+ FOR_EACH_NESTED_SUBJECT_END(subj)
72254+ FOR_EACH_SUBJECT_START(role, subj, x)
72255+ update_acl_obj_label(matchn->inode, matchn->device,
72256+ ino, dev, subj);
72257+ FOR_EACH_SUBJECT_END(subj,x)
72258+ FOR_EACH_ROLE_END(role)
72259+
72260+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
72261+
72262+ return;
72263+}
72264+
72265+static void
72266+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
72267+ const struct vfsmount *mnt)
72268+{
72269+ ino_t ino = dentry->d_inode->i_ino;
72270+ dev_t dev = __get_dev(dentry);
72271+
72272+ __do_handle_create(matchn, ino, dev);
72273+
72274+ return;
72275+}
72276+
72277+void
72278+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
72279+{
72280+ struct name_entry *matchn;
72281+
72282+ if (unlikely(!(gr_status & GR_READY)))
72283+ return;
72284+
72285+ preempt_disable();
72286+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
72287+
72288+ if (unlikely((unsigned long)matchn)) {
72289+ write_lock(&gr_inode_lock);
72290+ do_handle_create(matchn, dentry, mnt);
72291+ write_unlock(&gr_inode_lock);
72292+ }
72293+ preempt_enable();
72294+
72295+ return;
72296+}
72297+
72298+void
72299+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
72300+{
72301+ struct name_entry *matchn;
72302+
72303+ if (unlikely(!(gr_status & GR_READY)))
72304+ return;
72305+
72306+ preempt_disable();
72307+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
72308+
72309+ if (unlikely((unsigned long)matchn)) {
72310+ write_lock(&gr_inode_lock);
72311+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
72312+ write_unlock(&gr_inode_lock);
72313+ }
72314+ preempt_enable();
72315+
72316+ return;
72317+}
72318+
72319+void
72320+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
72321+ struct dentry *old_dentry,
72322+ struct dentry *new_dentry,
72323+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
72324+{
72325+ struct name_entry *matchn;
72326+ struct name_entry *matchn2 = NULL;
72327+ struct inodev_entry *inodev;
72328+ struct inode *inode = new_dentry->d_inode;
72329+ ino_t old_ino = old_dentry->d_inode->i_ino;
72330+ dev_t old_dev = __get_dev(old_dentry);
72331+ unsigned int exchange = flags & RENAME_EXCHANGE;
72332+
72333+ /* vfs_rename swaps the name and parent link for old_dentry and
72334+ new_dentry
72335+ at this point, old_dentry has the new name, parent link, and inode
72336+ for the renamed file
72337+ if a file is being replaced by a rename, new_dentry has the inode
72338+ and name for the replaced file
72339+ */
72340+
72341+ if (unlikely(!(gr_status & GR_READY)))
72342+ return;
72343+
72344+ preempt_disable();
72345+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
72346+
72347+ /* exchange cases:
72348+ a filename exists for the source, but not dest
72349+ do a recreate on source
72350+ a filename exists for the dest, but not source
72351+ do a recreate on dest
72352+ a filename exists for both source and dest
72353+ delete source and dest, then create source and dest
72354+ a filename exists for neither source nor dest
72355+ no updates needed
72356+
72357+ the name entry lookups get us the old inode/dev associated with
72358+ each name, so do the deletes first (if possible) so that when
72359+ we do the create, we pick up on the right entries
72360+ */
72361+
72362+ if (exchange)
72363+ matchn2 = lookup_name_entry(gr_to_filename_rbac(new_dentry, mnt));
72364+
72365+ /* we wouldn't have to check d_inode if it weren't for
72366+ NFS silly-renaming
72367+ */
72368+
72369+ write_lock(&gr_inode_lock);
72370+ if (unlikely((replace || exchange) && inode)) {
72371+ ino_t new_ino = inode->i_ino;
72372+ dev_t new_dev = __get_dev(new_dentry);
72373+
72374+ inodev = lookup_inodev_entry(new_ino, new_dev);
72375+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
72376+ do_handle_delete(inodev, new_ino, new_dev);
72377+ }
72378+
72379+ inodev = lookup_inodev_entry(old_ino, old_dev);
72380+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
72381+ do_handle_delete(inodev, old_ino, old_dev);
72382+
72383+ if (unlikely(matchn != NULL))
72384+ do_handle_create(matchn, old_dentry, mnt);
72385+
72386+ if (unlikely(matchn2 != NULL))
72387+ do_handle_create(matchn2, new_dentry, mnt);
72388+
72389+ write_unlock(&gr_inode_lock);
72390+ preempt_enable();
72391+
72392+ return;
72393+}
72394+
72395+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
72396+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
72397+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
72398+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
72399+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
72400+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
72401+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
72402+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
72403+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
72404+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
72405+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
72406+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
72407+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
72408+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
72409+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
72410+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
72411+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
72412+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
72413+};
72414+
72415+void
72416+gr_learn_resource(const struct task_struct *task,
72417+ const int res, const unsigned long wanted, const int gt)
72418+{
72419+ struct acl_subject_label *acl;
72420+ const struct cred *cred;
72421+
72422+ if (unlikely((gr_status & GR_READY) &&
72423+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
72424+ goto skip_reslog;
72425+
72426+ gr_log_resource(task, res, wanted, gt);
72427+skip_reslog:
72428+
72429+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
72430+ return;
72431+
72432+ acl = task->acl;
72433+
72434+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
72435+ !(acl->resmask & (1U << (unsigned short) res))))
72436+ return;
72437+
72438+ if (wanted >= acl->res[res].rlim_cur) {
72439+ unsigned long res_add;
72440+
72441+ res_add = wanted + res_learn_bumps[res];
72442+
72443+ acl->res[res].rlim_cur = res_add;
72444+
72445+ if (wanted > acl->res[res].rlim_max)
72446+ acl->res[res].rlim_max = res_add;
72447+
72448+ /* only log the subject filename, since resource logging is supported for
72449+ single-subject learning only */
72450+ rcu_read_lock();
72451+ cred = __task_cred(task);
72452+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
72453+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
72454+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
72455+ "", (unsigned long) res, &task->signal->saved_ip);
72456+ rcu_read_unlock();
72457+ }
72458+
72459+ return;
72460+}
72461+EXPORT_SYMBOL_GPL(gr_learn_resource);
72462+#endif
72463+
72464+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
72465+void
72466+pax_set_initial_flags(struct linux_binprm *bprm)
72467+{
72468+ struct task_struct *task = current;
72469+ struct acl_subject_label *proc;
72470+ unsigned long flags;
72471+
72472+ if (unlikely(!(gr_status & GR_READY)))
72473+ return;
72474+
72475+ flags = pax_get_flags(task);
72476+
72477+ proc = task->acl;
72478+
72479+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
72480+ flags &= ~MF_PAX_PAGEEXEC;
72481+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
72482+ flags &= ~MF_PAX_SEGMEXEC;
72483+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
72484+ flags &= ~MF_PAX_RANDMMAP;
72485+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
72486+ flags &= ~MF_PAX_EMUTRAMP;
72487+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
72488+ flags &= ~MF_PAX_MPROTECT;
72489+
72490+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
72491+ flags |= MF_PAX_PAGEEXEC;
72492+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
72493+ flags |= MF_PAX_SEGMEXEC;
72494+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
72495+ flags |= MF_PAX_RANDMMAP;
72496+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
72497+ flags |= MF_PAX_EMUTRAMP;
72498+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
72499+ flags |= MF_PAX_MPROTECT;
72500+
72501+ pax_set_flags(task, flags);
72502+
72503+ return;
72504+}
72505+#endif
72506+
72507+int
72508+gr_handle_proc_ptrace(struct task_struct *task)
72509+{
72510+ struct file *filp;
72511+ struct task_struct *tmp = task;
72512+ struct task_struct *curtemp = current;
72513+ __u32 retmode;
72514+
72515+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
72516+ if (unlikely(!(gr_status & GR_READY)))
72517+ return 0;
72518+#endif
72519+
72520+ read_lock(&tasklist_lock);
72521+ read_lock(&grsec_exec_file_lock);
72522+ filp = task->exec_file;
72523+
72524+ while (task_pid_nr(tmp) > 0) {
72525+ if (tmp == curtemp)
72526+ break;
72527+ tmp = tmp->real_parent;
72528+ }
72529+
72530+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
72531+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
72532+ read_unlock(&grsec_exec_file_lock);
72533+ read_unlock(&tasklist_lock);
72534+ return 1;
72535+ }
72536+
72537+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
72538+ if (!(gr_status & GR_READY)) {
72539+ read_unlock(&grsec_exec_file_lock);
72540+ read_unlock(&tasklist_lock);
72541+ return 0;
72542+ }
72543+#endif
72544+
72545+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
72546+ read_unlock(&grsec_exec_file_lock);
72547+ read_unlock(&tasklist_lock);
72548+
72549+ if (retmode & GR_NOPTRACE)
72550+ return 1;
72551+
72552+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
72553+ && (current->acl != task->acl || (current->acl != current->role->root_label
72554+ && task_pid_nr(current) != task_pid_nr(task))))
72555+ return 1;
72556+
72557+ return 0;
72558+}
72559+
72560+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
72561+{
72562+ if (unlikely(!(gr_status & GR_READY)))
72563+ return;
72564+
72565+ if (!(current->role->roletype & GR_ROLE_GOD))
72566+ return;
72567+
72568+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
72569+ p->role->rolename, gr_task_roletype_to_char(p),
72570+ p->acl->filename);
72571+}
72572+
72573+int
72574+gr_handle_ptrace(struct task_struct *task, const long request)
72575+{
72576+ struct task_struct *tmp = task;
72577+ struct task_struct *curtemp = current;
72578+ __u32 retmode;
72579+
72580+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
72581+ if (unlikely(!(gr_status & GR_READY)))
72582+ return 0;
72583+#endif
72584+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
72585+ read_lock(&tasklist_lock);
72586+ while (task_pid_nr(tmp) > 0) {
72587+ if (tmp == curtemp)
72588+ break;
72589+ tmp = tmp->real_parent;
72590+ }
72591+
72592+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
72593+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
72594+ read_unlock(&tasklist_lock);
72595+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
72596+ return 1;
72597+ }
72598+ read_unlock(&tasklist_lock);
72599+ }
72600+
72601+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
72602+ if (!(gr_status & GR_READY))
72603+ return 0;
72604+#endif
72605+
72606+ read_lock(&grsec_exec_file_lock);
72607+ if (unlikely(!task->exec_file)) {
72608+ read_unlock(&grsec_exec_file_lock);
72609+ return 0;
72610+ }
72611+
72612+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
72613+ read_unlock(&grsec_exec_file_lock);
72614+
72615+ if (retmode & GR_NOPTRACE) {
72616+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
72617+ return 1;
72618+ }
72619+
72620+ if (retmode & GR_PTRACERD) {
72621+ switch (request) {
72622+ case PTRACE_SEIZE:
72623+ case PTRACE_POKETEXT:
72624+ case PTRACE_POKEDATA:
72625+ case PTRACE_POKEUSR:
72626+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
72627+ case PTRACE_SETREGS:
72628+ case PTRACE_SETFPREGS:
72629+#endif
72630+#ifdef CONFIG_X86
72631+ case PTRACE_SETFPXREGS:
72632+#endif
72633+#ifdef CONFIG_ALTIVEC
72634+ case PTRACE_SETVRREGS:
72635+#endif
72636+ return 1;
72637+ default:
72638+ return 0;
72639+ }
72640+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
72641+ !(current->role->roletype & GR_ROLE_GOD) &&
72642+ (current->acl != task->acl)) {
72643+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
72644+ return 1;
72645+ }
72646+
72647+ return 0;
72648+}
72649+
72650+static int is_writable_mmap(const struct file *filp)
72651+{
72652+ struct task_struct *task = current;
72653+ struct acl_object_label *obj, *obj2;
72654+
72655+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
72656+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
72657+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
72658+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
72659+ task->role->root_label);
72660+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
72661+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
72662+ return 1;
72663+ }
72664+ }
72665+ return 0;
72666+}
72667+
72668+int
72669+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
72670+{
72671+ __u32 mode;
72672+
72673+ if (unlikely(!file || !(prot & PROT_EXEC)))
72674+ return 1;
72675+
72676+ if (is_writable_mmap(file))
72677+ return 0;
72678+
72679+ mode =
72680+ gr_search_file(file->f_path.dentry,
72681+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
72682+ file->f_path.mnt);
72683+
72684+ if (!gr_tpe_allow(file))
72685+ return 0;
72686+
72687+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
72688+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
72689+ return 0;
72690+ } else if (unlikely(!(mode & GR_EXEC))) {
72691+ return 0;
72692+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
72693+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
72694+ return 1;
72695+ }
72696+
72697+ return 1;
72698+}
72699+
72700+int
72701+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
72702+{
72703+ __u32 mode;
72704+
72705+ if (unlikely(!file || !(prot & PROT_EXEC)))
72706+ return 1;
72707+
72708+ if (is_writable_mmap(file))
72709+ return 0;
72710+
72711+ mode =
72712+ gr_search_file(file->f_path.dentry,
72713+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
72714+ file->f_path.mnt);
72715+
72716+ if (!gr_tpe_allow(file))
72717+ return 0;
72718+
72719+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
72720+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
72721+ return 0;
72722+ } else if (unlikely(!(mode & GR_EXEC))) {
72723+ return 0;
72724+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
72725+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
72726+ return 1;
72727+ }
72728+
72729+ return 1;
72730+}
72731+
72732+void
72733+gr_acl_handle_psacct(struct task_struct *task, const long code)
72734+{
72735+ unsigned long runtime, cputime;
72736+ cputime_t utime, stime;
72737+ unsigned int wday, cday;
72738+ __u8 whr, chr;
72739+ __u8 wmin, cmin;
72740+ __u8 wsec, csec;
72741+ struct timespec curtime, starttime;
72742+
72743+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
72744+ !(task->acl->mode & GR_PROCACCT)))
72745+ return;
72746+
72747+ curtime = ns_to_timespec(ktime_get_ns());
72748+ starttime = ns_to_timespec(task->start_time);
72749+ runtime = curtime.tv_sec - starttime.tv_sec;
72750+ wday = runtime / (60 * 60 * 24);
72751+ runtime -= wday * (60 * 60 * 24);
72752+ whr = runtime / (60 * 60);
72753+ runtime -= whr * (60 * 60);
72754+ wmin = runtime / 60;
72755+ runtime -= wmin * 60;
72756+ wsec = runtime;
72757+
72758+ task_cputime(task, &utime, &stime);
72759+ cputime = cputime_to_secs(utime + stime);
72760+ cday = cputime / (60 * 60 * 24);
72761+ cputime -= cday * (60 * 60 * 24);
72762+ chr = cputime / (60 * 60);
72763+ cputime -= chr * (60 * 60);
72764+ cmin = cputime / 60;
72765+ cputime -= cmin * 60;
72766+ csec = cputime;
72767+
72768+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
72769+
72770+ return;
72771+}
72772+
72773+#ifdef CONFIG_TASKSTATS
72774+int gr_is_taskstats_denied(int pid)
72775+{
72776+ struct task_struct *task;
72777+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72778+ const struct cred *cred;
72779+#endif
72780+ int ret = 0;
72781+
72782+ /* restrict taskstats viewing to un-chrooted root users
72783+ who have the 'view' subject flag if the RBAC system is enabled
72784+ */
72785+
72786+ rcu_read_lock();
72787+ read_lock(&tasklist_lock);
72788+ task = find_task_by_vpid(pid);
72789+ if (task) {
72790+#ifdef CONFIG_GRKERNSEC_CHROOT
72791+ if (proc_is_chrooted(task))
72792+ ret = -EACCES;
72793+#endif
72794+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72795+ cred = __task_cred(task);
72796+#ifdef CONFIG_GRKERNSEC_PROC_USER
72797+ if (gr_is_global_nonroot(cred->uid))
72798+ ret = -EACCES;
72799+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72800+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
72801+ ret = -EACCES;
72802+#endif
72803+#endif
72804+ if (gr_status & GR_READY) {
72805+ if (!(task->acl->mode & GR_VIEW))
72806+ ret = -EACCES;
72807+ }
72808+ } else
72809+ ret = -ENOENT;
72810+
72811+ read_unlock(&tasklist_lock);
72812+ rcu_read_unlock();
72813+
72814+ return ret;
72815+}
72816+#endif
72817+
72818+/* AUXV entries are filled via a descendant of search_binary_handler
72819+ after we've already applied the subject for the target
72820+*/
72821+int gr_acl_enable_at_secure(void)
72822+{
72823+ if (unlikely(!(gr_status & GR_READY)))
72824+ return 0;
72825+
72826+ if (current->acl->mode & GR_ATSECURE)
72827+ return 1;
72828+
72829+ return 0;
72830+}
72831+
72832+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
72833+{
72834+ struct task_struct *task = current;
72835+ struct dentry *dentry = file->f_path.dentry;
72836+ struct vfsmount *mnt = file->f_path.mnt;
72837+ struct acl_object_label *obj, *tmp;
72838+ struct acl_subject_label *subj;
72839+ unsigned int bufsize;
72840+ int is_not_root;
72841+ char *path;
72842+ dev_t dev = __get_dev(dentry);
72843+
72844+ if (unlikely(!(gr_status & GR_READY)))
72845+ return 1;
72846+
72847+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
72848+ return 1;
72849+
72850+ /* ignore Eric Biederman */
72851+ if (IS_PRIVATE(dentry->d_inode))
72852+ return 1;
72853+
72854+ subj = task->acl;
72855+ read_lock(&gr_inode_lock);
72856+ do {
72857+ obj = lookup_acl_obj_label(ino, dev, subj);
72858+ if (obj != NULL) {
72859+ read_unlock(&gr_inode_lock);
72860+ return (obj->mode & GR_FIND) ? 1 : 0;
72861+ }
72862+ } while ((subj = subj->parent_subject));
72863+ read_unlock(&gr_inode_lock);
72864+
72865+ /* this is purely an optimization since we're looking for an object
72866+ for the directory we're doing a readdir on
72867+ if it's possible for any globbed object to match the entry we're
72868+ filling into the directory, then the object we find here will be
72869+ an anchor point with attached globbed objects
72870+ */
72871+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
72872+ if (obj->globbed == NULL)
72873+ return (obj->mode & GR_FIND) ? 1 : 0;
72874+
72875+ is_not_root = ((obj->filename[0] == '/') &&
72876+ (obj->filename[1] == '\0')) ? 0 : 1;
72877+ bufsize = PAGE_SIZE - namelen - is_not_root;
72878+
72879+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
72880+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
72881+ return 1;
72882+
72883+ preempt_disable();
72884+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
72885+ bufsize);
72886+
72887+ bufsize = strlen(path);
72888+
72889+ /* if base is "/", don't append an additional slash */
72890+ if (is_not_root)
72891+ *(path + bufsize) = '/';
72892+ memcpy(path + bufsize + is_not_root, name, namelen);
72893+ *(path + bufsize + namelen + is_not_root) = '\0';
72894+
72895+ tmp = obj->globbed;
72896+ while (tmp) {
72897+ if (!glob_match(tmp->filename, path)) {
72898+ preempt_enable();
72899+ return (tmp->mode & GR_FIND) ? 1 : 0;
72900+ }
72901+ tmp = tmp->next;
72902+ }
72903+ preempt_enable();
72904+ return (obj->mode & GR_FIND) ? 1 : 0;
72905+}
72906+
72907+void gr_put_exec_file(struct task_struct *task)
72908+{
72909+ struct file *filp;
72910+
72911+ write_lock(&grsec_exec_file_lock);
72912+ filp = task->exec_file;
72913+ task->exec_file = NULL;
72914+ write_unlock(&grsec_exec_file_lock);
72915+
72916+ if (filp)
72917+ fput(filp);
72918+
72919+ return;
72920+}
72921+
72922+
72923+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
72924+EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
72925+#endif
72926+#ifdef CONFIG_SECURITY
72927+EXPORT_SYMBOL_GPL(gr_check_user_change);
72928+EXPORT_SYMBOL_GPL(gr_check_group_change);
72929+#endif
72930+
72931diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
72932new file mode 100644
72933index 0000000..18ffbbd
72934--- /dev/null
72935+++ b/grsecurity/gracl_alloc.c
72936@@ -0,0 +1,105 @@
72937+#include <linux/kernel.h>
72938+#include <linux/mm.h>
72939+#include <linux/slab.h>
72940+#include <linux/vmalloc.h>
72941+#include <linux/gracl.h>
72942+#include <linux/grsecurity.h>
72943+
72944+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
72945+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
72946+
72947+static __inline__ int
72948+alloc_pop(void)
72949+{
72950+ if (current_alloc_state->alloc_stack_next == 1)
72951+ return 0;
72952+
72953+ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
72954+
72955+ current_alloc_state->alloc_stack_next--;
72956+
72957+ return 1;
72958+}
72959+
72960+static __inline__ int
72961+alloc_push(void *buf)
72962+{
72963+ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
72964+ return 1;
72965+
72966+ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
72967+
72968+ current_alloc_state->alloc_stack_next++;
72969+
72970+ return 0;
72971+}
72972+
72973+void *
72974+acl_alloc(unsigned long len)
72975+{
72976+ void *ret = NULL;
72977+
72978+ if (!len || len > PAGE_SIZE)
72979+ goto out;
72980+
72981+ ret = kmalloc(len, GFP_KERNEL);
72982+
72983+ if (ret) {
72984+ if (alloc_push(ret)) {
72985+ kfree(ret);
72986+ ret = NULL;
72987+ }
72988+ }
72989+
72990+out:
72991+ return ret;
72992+}
72993+
72994+void *
72995+acl_alloc_num(unsigned long num, unsigned long len)
72996+{
72997+ if (!len || (num > (PAGE_SIZE / len)))
72998+ return NULL;
72999+
73000+ return acl_alloc(num * len);
73001+}
73002+
73003+void
73004+acl_free_all(void)
73005+{
73006+ if (!current_alloc_state->alloc_stack)
73007+ return;
73008+
73009+ while (alloc_pop()) ;
73010+
73011+ if (current_alloc_state->alloc_stack) {
73012+ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
73013+ kfree(current_alloc_state->alloc_stack);
73014+ else
73015+ vfree(current_alloc_state->alloc_stack);
73016+ }
73017+
73018+ current_alloc_state->alloc_stack = NULL;
73019+ current_alloc_state->alloc_stack_size = 1;
73020+ current_alloc_state->alloc_stack_next = 1;
73021+
73022+ return;
73023+}
73024+
73025+int
73026+acl_alloc_stack_init(unsigned long size)
73027+{
73028+ if ((size * sizeof (void *)) <= PAGE_SIZE)
73029+ current_alloc_state->alloc_stack =
73030+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
73031+ else
73032+ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
73033+
73034+ current_alloc_state->alloc_stack_size = size;
73035+ current_alloc_state->alloc_stack_next = 1;
73036+
73037+ if (!current_alloc_state->alloc_stack)
73038+ return 0;
73039+ else
73040+ return 1;
73041+}
73042diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
73043new file mode 100644
73044index 0000000..1a94c11
73045--- /dev/null
73046+++ b/grsecurity/gracl_cap.c
73047@@ -0,0 +1,127 @@
73048+#include <linux/kernel.h>
73049+#include <linux/module.h>
73050+#include <linux/sched.h>
73051+#include <linux/gracl.h>
73052+#include <linux/grsecurity.h>
73053+#include <linux/grinternal.h>
73054+
73055+extern const char *captab_log[];
73056+extern int captab_log_entries;
73057+
73058+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
73059+{
73060+ struct acl_subject_label *curracl;
73061+
73062+ if (!gr_acl_is_enabled())
73063+ return 1;
73064+
73065+ curracl = task->acl;
73066+
73067+ if (curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
73068+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
73069+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
73070+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
73071+ gr_to_filename(task->exec_file->f_path.dentry,
73072+ task->exec_file->f_path.mnt) : curracl->filename,
73073+ curracl->filename, 0UL,
73074+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
73075+ return 1;
73076+ }
73077+
73078+ return 0;
73079+}
73080+
73081+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
73082+{
73083+ struct acl_subject_label *curracl;
73084+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
73085+ kernel_cap_t cap_audit = __cap_empty_set;
73086+
73087+ if (!gr_acl_is_enabled())
73088+ return 1;
73089+
73090+ curracl = task->acl;
73091+
73092+ cap_drop = curracl->cap_lower;
73093+ cap_mask = curracl->cap_mask;
73094+ cap_audit = curracl->cap_invert_audit;
73095+
73096+ while ((curracl = curracl->parent_subject)) {
73097+ /* if the cap isn't specified in the current computed mask but is specified in the
73098+ current level subject, and is lowered in the current level subject, then add
73099+ it to the set of dropped capabilities
73100+ otherwise, add the current level subject's mask to the current computed mask
73101+ */
73102+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
73103+ cap_raise(cap_mask, cap);
73104+ if (cap_raised(curracl->cap_lower, cap))
73105+ cap_raise(cap_drop, cap);
73106+ if (cap_raised(curracl->cap_invert_audit, cap))
73107+ cap_raise(cap_audit, cap);
73108+ }
73109+ }
73110+
73111+ if (!cap_raised(cap_drop, cap)) {
73112+ if (cap_raised(cap_audit, cap))
73113+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
73114+ return 1;
73115+ }
73116+
73117+ /* only learn the capability use if the process has the capability in the
73118+ general case, the two uses in sys.c of gr_learn_cap are an exception
73119+ to this rule to ensure any role transition involves what the full-learned
73120+ policy believes in a privileged process
73121+ */
73122+ if (cap_raised(cred->cap_effective, cap) && gr_learn_cap(task, cred, cap))
73123+ return 1;
73124+
73125+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
73126+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
73127+
73128+ return 0;
73129+}
73130+
73131+int
73132+gr_acl_is_capable(const int cap)
73133+{
73134+ return gr_task_acl_is_capable(current, current_cred(), cap);
73135+}
73136+
73137+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
73138+{
73139+ struct acl_subject_label *curracl;
73140+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
73141+
73142+ if (!gr_acl_is_enabled())
73143+ return 1;
73144+
73145+ curracl = task->acl;
73146+
73147+ cap_drop = curracl->cap_lower;
73148+ cap_mask = curracl->cap_mask;
73149+
73150+ while ((curracl = curracl->parent_subject)) {
73151+ /* if the cap isn't specified in the current computed mask but is specified in the
73152+ current level subject, and is lowered in the current level subject, then add
73153+ it to the set of dropped capabilities
73154+ otherwise, add the current level subject's mask to the current computed mask
73155+ */
73156+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
73157+ cap_raise(cap_mask, cap);
73158+ if (cap_raised(curracl->cap_lower, cap))
73159+ cap_raise(cap_drop, cap);
73160+ }
73161+ }
73162+
73163+ if (!cap_raised(cap_drop, cap))
73164+ return 1;
73165+
73166+ return 0;
73167+}
73168+
73169+int
73170+gr_acl_is_capable_nolog(const int cap)
73171+{
73172+ return gr_task_acl_is_capable_nolog(current, cap);
73173+}
73174+
73175diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
73176new file mode 100644
73177index 0000000..ca25605
73178--- /dev/null
73179+++ b/grsecurity/gracl_compat.c
73180@@ -0,0 +1,270 @@
73181+#include <linux/kernel.h>
73182+#include <linux/gracl.h>
73183+#include <linux/compat.h>
73184+#include <linux/gracl_compat.h>
73185+
73186+#include <asm/uaccess.h>
73187+
73188+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
73189+{
73190+ struct gr_arg_wrapper_compat uwrapcompat;
73191+
73192+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
73193+ return -EFAULT;
73194+
73195+ if (((uwrapcompat.version != GRSECURITY_VERSION) &&
73196+ (uwrapcompat.version != 0x2901)) ||
73197+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
73198+ return -EINVAL;
73199+
73200+ uwrap->arg = compat_ptr(uwrapcompat.arg);
73201+ uwrap->version = uwrapcompat.version;
73202+ uwrap->size = sizeof(struct gr_arg);
73203+
73204+ return 0;
73205+}
73206+
73207+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
73208+{
73209+ struct gr_arg_compat argcompat;
73210+
73211+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
73212+ return -EFAULT;
73213+
73214+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
73215+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
73216+ arg->role_db.num_roles = argcompat.role_db.num_roles;
73217+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
73218+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
73219+ arg->role_db.num_objects = argcompat.role_db.num_objects;
73220+
73221+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
73222+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
73223+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
73224+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
73225+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
73226+ arg->segv_device = argcompat.segv_device;
73227+ arg->segv_inode = argcompat.segv_inode;
73228+ arg->segv_uid = argcompat.segv_uid;
73229+ arg->num_sprole_pws = argcompat.num_sprole_pws;
73230+ arg->mode = argcompat.mode;
73231+
73232+ return 0;
73233+}
73234+
73235+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
73236+{
73237+ struct acl_object_label_compat objcompat;
73238+
73239+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
73240+ return -EFAULT;
73241+
73242+ obj->filename = compat_ptr(objcompat.filename);
73243+ obj->inode = objcompat.inode;
73244+ obj->device = objcompat.device;
73245+ obj->mode = objcompat.mode;
73246+
73247+ obj->nested = compat_ptr(objcompat.nested);
73248+ obj->globbed = compat_ptr(objcompat.globbed);
73249+
73250+ obj->prev = compat_ptr(objcompat.prev);
73251+ obj->next = compat_ptr(objcompat.next);
73252+
73253+ return 0;
73254+}
73255+
73256+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
73257+{
73258+ unsigned int i;
73259+ struct acl_subject_label_compat subjcompat;
73260+
73261+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
73262+ return -EFAULT;
73263+
73264+ subj->filename = compat_ptr(subjcompat.filename);
73265+ subj->inode = subjcompat.inode;
73266+ subj->device = subjcompat.device;
73267+ subj->mode = subjcompat.mode;
73268+ subj->cap_mask = subjcompat.cap_mask;
73269+ subj->cap_lower = subjcompat.cap_lower;
73270+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
73271+
73272+ for (i = 0; i < GR_NLIMITS; i++) {
73273+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
73274+ subj->res[i].rlim_cur = RLIM_INFINITY;
73275+ else
73276+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
73277+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
73278+ subj->res[i].rlim_max = RLIM_INFINITY;
73279+ else
73280+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
73281+ }
73282+ subj->resmask = subjcompat.resmask;
73283+
73284+ subj->user_trans_type = subjcompat.user_trans_type;
73285+ subj->group_trans_type = subjcompat.group_trans_type;
73286+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
73287+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
73288+ subj->user_trans_num = subjcompat.user_trans_num;
73289+ subj->group_trans_num = subjcompat.group_trans_num;
73290+
73291+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
73292+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
73293+ subj->ip_type = subjcompat.ip_type;
73294+ subj->ips = compat_ptr(subjcompat.ips);
73295+ subj->ip_num = subjcompat.ip_num;
73296+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
73297+
73298+ subj->crashes = subjcompat.crashes;
73299+ subj->expires = subjcompat.expires;
73300+
73301+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
73302+ subj->hash = compat_ptr(subjcompat.hash);
73303+ subj->prev = compat_ptr(subjcompat.prev);
73304+ subj->next = compat_ptr(subjcompat.next);
73305+
73306+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
73307+ subj->obj_hash_size = subjcompat.obj_hash_size;
73308+ subj->pax_flags = subjcompat.pax_flags;
73309+
73310+ return 0;
73311+}
73312+
73313+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
73314+{
73315+ struct acl_role_label_compat rolecompat;
73316+
73317+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
73318+ return -EFAULT;
73319+
73320+ role->rolename = compat_ptr(rolecompat.rolename);
73321+ role->uidgid = rolecompat.uidgid;
73322+ role->roletype = rolecompat.roletype;
73323+
73324+ role->auth_attempts = rolecompat.auth_attempts;
73325+ role->expires = rolecompat.expires;
73326+
73327+ role->root_label = compat_ptr(rolecompat.root_label);
73328+ role->hash = compat_ptr(rolecompat.hash);
73329+
73330+ role->prev = compat_ptr(rolecompat.prev);
73331+ role->next = compat_ptr(rolecompat.next);
73332+
73333+ role->transitions = compat_ptr(rolecompat.transitions);
73334+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
73335+ role->domain_children = compat_ptr(rolecompat.domain_children);
73336+ role->domain_child_num = rolecompat.domain_child_num;
73337+
73338+ role->umask = rolecompat.umask;
73339+
73340+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
73341+ role->subj_hash_size = rolecompat.subj_hash_size;
73342+
73343+ return 0;
73344+}
73345+
73346+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
73347+{
73348+ struct role_allowed_ip_compat roleip_compat;
73349+
73350+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
73351+ return -EFAULT;
73352+
73353+ roleip->addr = roleip_compat.addr;
73354+ roleip->netmask = roleip_compat.netmask;
73355+
73356+ roleip->prev = compat_ptr(roleip_compat.prev);
73357+ roleip->next = compat_ptr(roleip_compat.next);
73358+
73359+ return 0;
73360+}
73361+
73362+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
73363+{
73364+ struct role_transition_compat trans_compat;
73365+
73366+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
73367+ return -EFAULT;
73368+
73369+ trans->rolename = compat_ptr(trans_compat.rolename);
73370+
73371+ trans->prev = compat_ptr(trans_compat.prev);
73372+ trans->next = compat_ptr(trans_compat.next);
73373+
73374+ return 0;
73375+
73376+}
73377+
73378+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
73379+{
73380+ struct gr_hash_struct_compat hash_compat;
73381+
73382+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
73383+ return -EFAULT;
73384+
73385+ hash->table = compat_ptr(hash_compat.table);
73386+ hash->nametable = compat_ptr(hash_compat.nametable);
73387+ hash->first = compat_ptr(hash_compat.first);
73388+
73389+ hash->table_size = hash_compat.table_size;
73390+ hash->used_size = hash_compat.used_size;
73391+
73392+ hash->type = hash_compat.type;
73393+
73394+ return 0;
73395+}
73396+
73397+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
73398+{
73399+ compat_uptr_t ptrcompat;
73400+
73401+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
73402+ return -EFAULT;
73403+
73404+ *(void **)ptr = compat_ptr(ptrcompat);
73405+
73406+ return 0;
73407+}
73408+
73409+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
73410+{
73411+ struct acl_ip_label_compat ip_compat;
73412+
73413+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
73414+ return -EFAULT;
73415+
73416+ ip->iface = compat_ptr(ip_compat.iface);
73417+ ip->addr = ip_compat.addr;
73418+ ip->netmask = ip_compat.netmask;
73419+ ip->low = ip_compat.low;
73420+ ip->high = ip_compat.high;
73421+ ip->mode = ip_compat.mode;
73422+ ip->type = ip_compat.type;
73423+
73424+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
73425+
73426+ ip->prev = compat_ptr(ip_compat.prev);
73427+ ip->next = compat_ptr(ip_compat.next);
73428+
73429+ return 0;
73430+}
73431+
73432+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
73433+{
73434+ struct sprole_pw_compat pw_compat;
73435+
73436+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
73437+ return -EFAULT;
73438+
73439+ pw->rolename = compat_ptr(pw_compat.rolename);
73440+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
73441+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
73442+
73443+ return 0;
73444+}
73445+
73446+size_t get_gr_arg_wrapper_size_compat(void)
73447+{
73448+ return sizeof(struct gr_arg_wrapper_compat);
73449+}
73450+
73451diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
73452new file mode 100644
73453index 0000000..4008fdc
73454--- /dev/null
73455+++ b/grsecurity/gracl_fs.c
73456@@ -0,0 +1,445 @@
73457+#include <linux/kernel.h>
73458+#include <linux/sched.h>
73459+#include <linux/types.h>
73460+#include <linux/fs.h>
73461+#include <linux/file.h>
73462+#include <linux/stat.h>
73463+#include <linux/grsecurity.h>
73464+#include <linux/grinternal.h>
73465+#include <linux/gracl.h>
73466+
73467+umode_t
73468+gr_acl_umask(void)
73469+{
73470+ if (unlikely(!gr_acl_is_enabled()))
73471+ return 0;
73472+
73473+ return current->role->umask;
73474+}
73475+
73476+__u32
73477+gr_acl_handle_hidden_file(const struct dentry * dentry,
73478+ const struct vfsmount * mnt)
73479+{
73480+ __u32 mode;
73481+
73482+ if (unlikely(d_is_negative(dentry)))
73483+ return GR_FIND;
73484+
73485+ mode =
73486+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
73487+
73488+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
73489+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
73490+ return mode;
73491+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
73492+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
73493+ return 0;
73494+ } else if (unlikely(!(mode & GR_FIND)))
73495+ return 0;
73496+
73497+ return GR_FIND;
73498+}
73499+
73500+__u32
73501+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
73502+ int acc_mode)
73503+{
73504+ __u32 reqmode = GR_FIND;
73505+ __u32 mode;
73506+
73507+ if (unlikely(d_is_negative(dentry)))
73508+ return reqmode;
73509+
73510+ if (acc_mode & MAY_APPEND)
73511+ reqmode |= GR_APPEND;
73512+ else if (acc_mode & MAY_WRITE)
73513+ reqmode |= GR_WRITE;
73514+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
73515+ reqmode |= GR_READ;
73516+
73517+ mode =
73518+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
73519+ mnt);
73520+
73521+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
73522+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
73523+ reqmode & GR_READ ? " reading" : "",
73524+ reqmode & GR_WRITE ? " writing" : reqmode &
73525+ GR_APPEND ? " appending" : "");
73526+ return reqmode;
73527+ } else
73528+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
73529+ {
73530+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
73531+ reqmode & GR_READ ? " reading" : "",
73532+ reqmode & GR_WRITE ? " writing" : reqmode &
73533+ GR_APPEND ? " appending" : "");
73534+ return 0;
73535+ } else if (unlikely((mode & reqmode) != reqmode))
73536+ return 0;
73537+
73538+ return reqmode;
73539+}
73540+
73541+__u32
73542+gr_acl_handle_creat(const struct dentry * dentry,
73543+ const struct dentry * p_dentry,
73544+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
73545+ const int imode)
73546+{
73547+ __u32 reqmode = GR_WRITE | GR_CREATE;
73548+ __u32 mode;
73549+
73550+ if (acc_mode & MAY_APPEND)
73551+ reqmode |= GR_APPEND;
73552+ // if a directory was required or the directory already exists, then
73553+ // don't count this open as a read
73554+ if ((acc_mode & MAY_READ) &&
73555+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
73556+ reqmode |= GR_READ;
73557+ if ((open_flags & O_CREAT) &&
73558+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
73559+ reqmode |= GR_SETID;
73560+
73561+ mode =
73562+ gr_check_create(dentry, p_dentry, p_mnt,
73563+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
73564+
73565+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
73566+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
73567+ reqmode & GR_READ ? " reading" : "",
73568+ reqmode & GR_WRITE ? " writing" : reqmode &
73569+ GR_APPEND ? " appending" : "");
73570+ return reqmode;
73571+ } else
73572+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
73573+ {
73574+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
73575+ reqmode & GR_READ ? " reading" : "",
73576+ reqmode & GR_WRITE ? " writing" : reqmode &
73577+ GR_APPEND ? " appending" : "");
73578+ return 0;
73579+ } else if (unlikely((mode & reqmode) != reqmode))
73580+ return 0;
73581+
73582+ return reqmode;
73583+}
73584+
73585+__u32
73586+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
73587+ const int fmode)
73588+{
73589+ __u32 mode, reqmode = GR_FIND;
73590+
73591+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
73592+ reqmode |= GR_EXEC;
73593+ if (fmode & S_IWOTH)
73594+ reqmode |= GR_WRITE;
73595+ if (fmode & S_IROTH)
73596+ reqmode |= GR_READ;
73597+
73598+ mode =
73599+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
73600+ mnt);
73601+
73602+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
73603+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
73604+ reqmode & GR_READ ? " reading" : "",
73605+ reqmode & GR_WRITE ? " writing" : "",
73606+ reqmode & GR_EXEC ? " executing" : "");
73607+ return reqmode;
73608+ } else
73609+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
73610+ {
73611+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
73612+ reqmode & GR_READ ? " reading" : "",
73613+ reqmode & GR_WRITE ? " writing" : "",
73614+ reqmode & GR_EXEC ? " executing" : "");
73615+ return 0;
73616+ } else if (unlikely((mode & reqmode) != reqmode))
73617+ return 0;
73618+
73619+ return reqmode;
73620+}
73621+
73622+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
73623+{
73624+ __u32 mode;
73625+
73626+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
73627+
73628+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
73629+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
73630+ return mode;
73631+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
73632+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
73633+ return 0;
73634+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
73635+ return 0;
73636+
73637+ return (reqmode);
73638+}
73639+
73640+__u32
73641+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
73642+{
73643+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
73644+}
73645+
73646+__u32
73647+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
73648+{
73649+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
73650+}
73651+
73652+__u32
73653+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
73654+{
73655+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
73656+}
73657+
73658+__u32
73659+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
73660+{
73661+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
73662+}
73663+
73664+__u32
73665+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
73666+ umode_t *modeptr)
73667+{
73668+ umode_t mode;
73669+
73670+ *modeptr &= ~gr_acl_umask();
73671+ mode = *modeptr;
73672+
73673+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
73674+ return 1;
73675+
73676+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
73677+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
73678+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
73679+ GR_CHMOD_ACL_MSG);
73680+ } else {
73681+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
73682+ }
73683+}
73684+
73685+__u32
73686+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
73687+{
73688+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
73689+}
73690+
73691+__u32
73692+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
73693+{
73694+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
73695+}
73696+
73697+__u32
73698+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
73699+{
73700+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
73701+}
73702+
73703+__u32
73704+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
73705+{
73706+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
73707+}
73708+
73709+__u32
73710+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
73711+{
73712+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
73713+ GR_UNIXCONNECT_ACL_MSG);
73714+}
73715+
73716+/* hardlinks require at minimum create and link permission,
73717+ any additional privilege required is based on the
73718+ privilege of the file being linked to
73719+*/
73720+__u32
73721+gr_acl_handle_link(const struct dentry * new_dentry,
73722+ const struct dentry * parent_dentry,
73723+ const struct vfsmount * parent_mnt,
73724+ const struct dentry * old_dentry,
73725+ const struct vfsmount * old_mnt, const struct filename *to)
73726+{
73727+ __u32 mode;
73728+ __u32 needmode = GR_CREATE | GR_LINK;
73729+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
73730+
73731+ mode =
73732+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
73733+ old_mnt);
73734+
73735+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
73736+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
73737+ return mode;
73738+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
73739+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
73740+ return 0;
73741+ } else if (unlikely((mode & needmode) != needmode))
73742+ return 0;
73743+
73744+ return 1;
73745+}
73746+
73747+__u32
73748+gr_acl_handle_symlink(const struct dentry * new_dentry,
73749+ const struct dentry * parent_dentry,
73750+ const struct vfsmount * parent_mnt, const struct filename *from)
73751+{
73752+ __u32 needmode = GR_WRITE | GR_CREATE;
73753+ __u32 mode;
73754+
73755+ mode =
73756+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
73757+ GR_CREATE | GR_AUDIT_CREATE |
73758+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
73759+
73760+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
73761+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
73762+ return mode;
73763+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
73764+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
73765+ return 0;
73766+ } else if (unlikely((mode & needmode) != needmode))
73767+ return 0;
73768+
73769+ return (GR_WRITE | GR_CREATE);
73770+}
73771+
73772+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
73773+{
73774+ __u32 mode;
73775+
73776+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
73777+
73778+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
73779+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
73780+ return mode;
73781+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
73782+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
73783+ return 0;
73784+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
73785+ return 0;
73786+
73787+ return (reqmode);
73788+}
73789+
73790+__u32
73791+gr_acl_handle_mknod(const struct dentry * new_dentry,
73792+ const struct dentry * parent_dentry,
73793+ const struct vfsmount * parent_mnt,
73794+ const int mode)
73795+{
73796+ __u32 reqmode = GR_WRITE | GR_CREATE;
73797+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
73798+ reqmode |= GR_SETID;
73799+
73800+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
73801+ reqmode, GR_MKNOD_ACL_MSG);
73802+}
73803+
73804+__u32
73805+gr_acl_handle_mkdir(const struct dentry *new_dentry,
73806+ const struct dentry *parent_dentry,
73807+ const struct vfsmount *parent_mnt)
73808+{
73809+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
73810+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
73811+}
73812+
73813+#define RENAME_CHECK_SUCCESS(old, new) \
73814+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
73815+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
73816+
73817+int
73818+gr_acl_handle_rename(struct dentry *new_dentry,
73819+ struct dentry *parent_dentry,
73820+ const struct vfsmount *parent_mnt,
73821+ struct dentry *old_dentry,
73822+ struct inode *old_parent_inode,
73823+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags)
73824+{
73825+ __u32 comp1, comp2;
73826+ int error = 0;
73827+
73828+ if (unlikely(!gr_acl_is_enabled()))
73829+ return 0;
73830+
73831+ if (flags & RENAME_EXCHANGE) {
73832+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
73833+ GR_AUDIT_READ | GR_AUDIT_WRITE |
73834+ GR_SUPPRESS, parent_mnt);
73835+ comp2 =
73836+ gr_search_file(old_dentry,
73837+ GR_READ | GR_WRITE | GR_AUDIT_READ |
73838+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
73839+ } else if (d_is_negative(new_dentry)) {
73840+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
73841+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
73842+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
73843+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
73844+ GR_DELETE | GR_AUDIT_DELETE |
73845+ GR_AUDIT_READ | GR_AUDIT_WRITE |
73846+ GR_SUPPRESS, old_mnt);
73847+ } else {
73848+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
73849+ GR_CREATE | GR_DELETE |
73850+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
73851+ GR_AUDIT_READ | GR_AUDIT_WRITE |
73852+ GR_SUPPRESS, parent_mnt);
73853+ comp2 =
73854+ gr_search_file(old_dentry,
73855+ GR_READ | GR_WRITE | GR_AUDIT_READ |
73856+ GR_DELETE | GR_AUDIT_DELETE |
73857+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
73858+ }
73859+
73860+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
73861+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
73862+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
73863+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
73864+ && !(comp2 & GR_SUPPRESS)) {
73865+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
73866+ error = -EACCES;
73867+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
73868+ error = -EACCES;
73869+
73870+ return error;
73871+}
73872+
73873+void
73874+gr_acl_handle_exit(void)
73875+{
73876+ u16 id;
73877+ char *rolename;
73878+
73879+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
73880+ !(current->role->roletype & GR_ROLE_PERSIST))) {
73881+ id = current->acl_role_id;
73882+ rolename = current->role->rolename;
73883+ gr_set_acls(1);
73884+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
73885+ }
73886+
73887+ gr_put_exec_file(current);
73888+ return;
73889+}
73890+
73891+int
73892+gr_acl_handle_procpidmem(const struct task_struct *task)
73893+{
73894+ if (unlikely(!gr_acl_is_enabled()))
73895+ return 0;
73896+
73897+ if (task != current && task->acl->mode & GR_PROTPROCFD)
73898+ return -EACCES;
73899+
73900+ return 0;
73901+}
73902diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
73903new file mode 100644
73904index 0000000..f056b81
73905--- /dev/null
73906+++ b/grsecurity/gracl_ip.c
73907@@ -0,0 +1,386 @@
73908+#include <linux/kernel.h>
73909+#include <asm/uaccess.h>
73910+#include <asm/errno.h>
73911+#include <net/sock.h>
73912+#include <linux/file.h>
73913+#include <linux/fs.h>
73914+#include <linux/net.h>
73915+#include <linux/in.h>
73916+#include <linux/skbuff.h>
73917+#include <linux/ip.h>
73918+#include <linux/udp.h>
73919+#include <linux/types.h>
73920+#include <linux/sched.h>
73921+#include <linux/netdevice.h>
73922+#include <linux/inetdevice.h>
73923+#include <linux/gracl.h>
73924+#include <linux/grsecurity.h>
73925+#include <linux/grinternal.h>
73926+
73927+#define GR_BIND 0x01
73928+#define GR_CONNECT 0x02
73929+#define GR_INVERT 0x04
73930+#define GR_BINDOVERRIDE 0x08
73931+#define GR_CONNECTOVERRIDE 0x10
73932+#define GR_SOCK_FAMILY 0x20
73933+
73934+static const char * gr_protocols[IPPROTO_MAX] = {
73935+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
73936+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
73937+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
73938+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
73939+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
73940+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
73941+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
73942+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
73943+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
73944+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
73945+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
73946+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
73947+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
73948+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
73949+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
73950+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
73951+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
73952+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
73953+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
73954+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
73955+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
73956+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
73957+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
73958+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
73959+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
73960+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
73961+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
73962+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
73963+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
73964+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
73965+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
73966+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
73967+ };
73968+
73969+static const char * gr_socktypes[SOCK_MAX] = {
73970+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
73971+ "unknown:7", "unknown:8", "unknown:9", "packet"
73972+ };
73973+
73974+static const char * gr_sockfamilies[AF_MAX+1] = {
73975+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
73976+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
73977+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
73978+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
73979+ };
73980+
73981+const char *
73982+gr_proto_to_name(unsigned char proto)
73983+{
73984+ return gr_protocols[proto];
73985+}
73986+
73987+const char *
73988+gr_socktype_to_name(unsigned char type)
73989+{
73990+ return gr_socktypes[type];
73991+}
73992+
73993+const char *
73994+gr_sockfamily_to_name(unsigned char family)
73995+{
73996+ return gr_sockfamilies[family];
73997+}
73998+
73999+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
74000+
74001+int
74002+gr_search_socket(const int domain, const int type, const int protocol)
74003+{
74004+ struct acl_subject_label *curr;
74005+ const struct cred *cred = current_cred();
74006+
74007+ if (unlikely(!gr_acl_is_enabled()))
74008+ goto exit;
74009+
74010+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
74011+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
74012+ goto exit; // let the kernel handle it
74013+
74014+ curr = current->acl;
74015+
74016+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
74017+ /* the family is allowed, if this is PF_INET allow it only if
74018+ the extra sock type/protocol checks pass */
74019+ if (domain == PF_INET)
74020+ goto inet_check;
74021+ goto exit;
74022+ } else {
74023+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
74024+ __u32 fakeip = 0;
74025+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74026+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74027+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74028+ gr_to_filename(current->exec_file->f_path.dentry,
74029+ current->exec_file->f_path.mnt) :
74030+ curr->filename, curr->filename,
74031+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
74032+ &current->signal->saved_ip);
74033+ goto exit;
74034+ }
74035+ goto exit_fail;
74036+ }
74037+
74038+inet_check:
74039+ /* the rest of this checking is for IPv4 only */
74040+ if (!curr->ips)
74041+ goto exit;
74042+
74043+ if ((curr->ip_type & (1U << type)) &&
74044+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
74045+ goto exit;
74046+
74047+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
74048+ /* we don't place acls on raw sockets , and sometimes
74049+ dgram/ip sockets are opened for ioctl and not
74050+ bind/connect, so we'll fake a bind learn log */
74051+ if (type == SOCK_RAW || type == SOCK_PACKET) {
74052+ __u32 fakeip = 0;
74053+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74054+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74055+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74056+ gr_to_filename(current->exec_file->f_path.dentry,
74057+ current->exec_file->f_path.mnt) :
74058+ curr->filename, curr->filename,
74059+ &fakeip, 0, type,
74060+ protocol, GR_CONNECT, &current->signal->saved_ip);
74061+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
74062+ __u32 fakeip = 0;
74063+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74064+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74065+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74066+ gr_to_filename(current->exec_file->f_path.dentry,
74067+ current->exec_file->f_path.mnt) :
74068+ curr->filename, curr->filename,
74069+ &fakeip, 0, type,
74070+ protocol, GR_BIND, &current->signal->saved_ip);
74071+ }
74072+ /* we'll log when they use connect or bind */
74073+ goto exit;
74074+ }
74075+
74076+exit_fail:
74077+ if (domain == PF_INET)
74078+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
74079+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
74080+ else if (rcu_access_pointer(net_families[domain]) != NULL)
74081+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
74082+ gr_socktype_to_name(type), protocol);
74083+
74084+ return 0;
74085+exit:
74086+ return 1;
74087+}
74088+
74089+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
74090+{
74091+ if ((ip->mode & mode) &&
74092+ (ip_port >= ip->low) &&
74093+ (ip_port <= ip->high) &&
74094+ ((ntohl(ip_addr) & our_netmask) ==
74095+ (ntohl(our_addr) & our_netmask))
74096+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
74097+ && (ip->type & (1U << type))) {
74098+ if (ip->mode & GR_INVERT)
74099+ return 2; // specifically denied
74100+ else
74101+ return 1; // allowed
74102+ }
74103+
74104+ return 0; // not specifically allowed, may continue parsing
74105+}
74106+
74107+static int
74108+gr_search_connectbind(const int full_mode, struct sock *sk,
74109+ struct sockaddr_in *addr, const int type)
74110+{
74111+ char iface[IFNAMSIZ] = {0};
74112+ struct acl_subject_label *curr;
74113+ struct acl_ip_label *ip;
74114+ struct inet_sock *isk;
74115+ struct net_device *dev;
74116+ struct in_device *idev;
74117+ unsigned long i;
74118+ int ret;
74119+ int mode = full_mode & (GR_BIND | GR_CONNECT);
74120+ __u32 ip_addr = 0;
74121+ __u32 our_addr;
74122+ __u32 our_netmask;
74123+ char *p;
74124+ __u16 ip_port = 0;
74125+ const struct cred *cred = current_cred();
74126+
74127+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
74128+ return 0;
74129+
74130+ curr = current->acl;
74131+ isk = inet_sk(sk);
74132+
74133+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
74134+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
74135+ addr->sin_addr.s_addr = curr->inaddr_any_override;
74136+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
74137+ struct sockaddr_in saddr;
74138+ int err;
74139+
74140+ saddr.sin_family = AF_INET;
74141+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
74142+ saddr.sin_port = isk->inet_sport;
74143+
74144+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
74145+ if (err)
74146+ return err;
74147+
74148+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
74149+ if (err)
74150+ return err;
74151+ }
74152+
74153+ if (!curr->ips)
74154+ return 0;
74155+
74156+ ip_addr = addr->sin_addr.s_addr;
74157+ ip_port = ntohs(addr->sin_port);
74158+
74159+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
74160+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
74161+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
74162+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
74163+ gr_to_filename(current->exec_file->f_path.dentry,
74164+ current->exec_file->f_path.mnt) :
74165+ curr->filename, curr->filename,
74166+ &ip_addr, ip_port, type,
74167+ sk->sk_protocol, mode, &current->signal->saved_ip);
74168+ return 0;
74169+ }
74170+
74171+ for (i = 0; i < curr->ip_num; i++) {
74172+ ip = *(curr->ips + i);
74173+ if (ip->iface != NULL) {
74174+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
74175+ p = strchr(iface, ':');
74176+ if (p != NULL)
74177+ *p = '\0';
74178+ dev = dev_get_by_name(sock_net(sk), iface);
74179+ if (dev == NULL)
74180+ continue;
74181+ idev = in_dev_get(dev);
74182+ if (idev == NULL) {
74183+ dev_put(dev);
74184+ continue;
74185+ }
74186+ rcu_read_lock();
74187+ for_ifa(idev) {
74188+ if (!strcmp(ip->iface, ifa->ifa_label)) {
74189+ our_addr = ifa->ifa_address;
74190+ our_netmask = 0xffffffff;
74191+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
74192+ if (ret == 1) {
74193+ rcu_read_unlock();
74194+ in_dev_put(idev);
74195+ dev_put(dev);
74196+ return 0;
74197+ } else if (ret == 2) {
74198+ rcu_read_unlock();
74199+ in_dev_put(idev);
74200+ dev_put(dev);
74201+ goto denied;
74202+ }
74203+ }
74204+ } endfor_ifa(idev);
74205+ rcu_read_unlock();
74206+ in_dev_put(idev);
74207+ dev_put(dev);
74208+ } else {
74209+ our_addr = ip->addr;
74210+ our_netmask = ip->netmask;
74211+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
74212+ if (ret == 1)
74213+ return 0;
74214+ else if (ret == 2)
74215+ goto denied;
74216+ }
74217+ }
74218+
74219+denied:
74220+ if (mode == GR_BIND)
74221+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
74222+ else if (mode == GR_CONNECT)
74223+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
74224+
74225+ return -EACCES;
74226+}
74227+
74228+int
74229+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
74230+{
74231+ /* always allow disconnection of dgram sockets with connect */
74232+ if (addr->sin_family == AF_UNSPEC)
74233+ return 0;
74234+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
74235+}
74236+
74237+int
74238+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
74239+{
74240+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
74241+}
74242+
74243+int gr_search_listen(struct socket *sock)
74244+{
74245+ struct sock *sk = sock->sk;
74246+ struct sockaddr_in addr;
74247+
74248+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
74249+ addr.sin_port = inet_sk(sk)->inet_sport;
74250+
74251+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
74252+}
74253+
74254+int gr_search_accept(struct socket *sock)
74255+{
74256+ struct sock *sk = sock->sk;
74257+ struct sockaddr_in addr;
74258+
74259+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
74260+ addr.sin_port = inet_sk(sk)->inet_sport;
74261+
74262+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
74263+}
74264+
74265+int
74266+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
74267+{
74268+ if (addr)
74269+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
74270+ else {
74271+ struct sockaddr_in sin;
74272+ const struct inet_sock *inet = inet_sk(sk);
74273+
74274+ sin.sin_addr.s_addr = inet->inet_daddr;
74275+ sin.sin_port = inet->inet_dport;
74276+
74277+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
74278+ }
74279+}
74280+
74281+int
74282+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
74283+{
74284+ struct sockaddr_in sin;
74285+
74286+ if (unlikely(skb->len < sizeof (struct udphdr)))
74287+ return 0; // skip this packet
74288+
74289+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
74290+ sin.sin_port = udp_hdr(skb)->source;
74291+
74292+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
74293+}
74294diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
74295new file mode 100644
74296index 0000000..25f54ef
74297--- /dev/null
74298+++ b/grsecurity/gracl_learn.c
74299@@ -0,0 +1,207 @@
74300+#include <linux/kernel.h>
74301+#include <linux/mm.h>
74302+#include <linux/sched.h>
74303+#include <linux/poll.h>
74304+#include <linux/string.h>
74305+#include <linux/file.h>
74306+#include <linux/types.h>
74307+#include <linux/vmalloc.h>
74308+#include <linux/grinternal.h>
74309+
74310+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
74311+ size_t count, loff_t *ppos);
74312+extern int gr_acl_is_enabled(void);
74313+
74314+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
74315+static int gr_learn_attached;
74316+
74317+/* use a 512k buffer */
74318+#define LEARN_BUFFER_SIZE (512 * 1024)
74319+
74320+static DEFINE_SPINLOCK(gr_learn_lock);
74321+static DEFINE_MUTEX(gr_learn_user_mutex);
74322+
74323+/* we need to maintain two buffers, so that the kernel context of grlearn
74324+ uses a semaphore around the userspace copying, and the other kernel contexts
74325+ use a spinlock when copying into the buffer, since they cannot sleep
74326+*/
74327+static char *learn_buffer;
74328+static char *learn_buffer_user;
74329+static int learn_buffer_len;
74330+static int learn_buffer_user_len;
74331+
74332+static ssize_t
74333+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
74334+{
74335+ DECLARE_WAITQUEUE(wait, current);
74336+ ssize_t retval = 0;
74337+
74338+ add_wait_queue(&learn_wait, &wait);
74339+ set_current_state(TASK_INTERRUPTIBLE);
74340+ do {
74341+ mutex_lock(&gr_learn_user_mutex);
74342+ spin_lock(&gr_learn_lock);
74343+ if (learn_buffer_len)
74344+ break;
74345+ spin_unlock(&gr_learn_lock);
74346+ mutex_unlock(&gr_learn_user_mutex);
74347+ if (file->f_flags & O_NONBLOCK) {
74348+ retval = -EAGAIN;
74349+ goto out;
74350+ }
74351+ if (signal_pending(current)) {
74352+ retval = -ERESTARTSYS;
74353+ goto out;
74354+ }
74355+
74356+ schedule();
74357+ } while (1);
74358+
74359+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
74360+ learn_buffer_user_len = learn_buffer_len;
74361+ retval = learn_buffer_len;
74362+ learn_buffer_len = 0;
74363+
74364+ spin_unlock(&gr_learn_lock);
74365+
74366+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
74367+ retval = -EFAULT;
74368+
74369+ mutex_unlock(&gr_learn_user_mutex);
74370+out:
74371+ set_current_state(TASK_RUNNING);
74372+ remove_wait_queue(&learn_wait, &wait);
74373+ return retval;
74374+}
74375+
74376+static unsigned int
74377+poll_learn(struct file * file, poll_table * wait)
74378+{
74379+ poll_wait(file, &learn_wait, wait);
74380+
74381+ if (learn_buffer_len)
74382+ return (POLLIN | POLLRDNORM);
74383+
74384+ return 0;
74385+}
74386+
74387+void
74388+gr_clear_learn_entries(void)
74389+{
74390+ char *tmp;
74391+
74392+ mutex_lock(&gr_learn_user_mutex);
74393+ spin_lock(&gr_learn_lock);
74394+ tmp = learn_buffer;
74395+ learn_buffer = NULL;
74396+ spin_unlock(&gr_learn_lock);
74397+ if (tmp)
74398+ vfree(tmp);
74399+ if (learn_buffer_user != NULL) {
74400+ vfree(learn_buffer_user);
74401+ learn_buffer_user = NULL;
74402+ }
74403+ learn_buffer_len = 0;
74404+ mutex_unlock(&gr_learn_user_mutex);
74405+
74406+ return;
74407+}
74408+
74409+void
74410+gr_add_learn_entry(const char *fmt, ...)
74411+{
74412+ va_list args;
74413+ unsigned int len;
74414+
74415+ if (!gr_learn_attached)
74416+ return;
74417+
74418+ spin_lock(&gr_learn_lock);
74419+
74420+ /* leave a gap at the end so we know when it's "full" but don't have to
74421+ compute the exact length of the string we're trying to append
74422+ */
74423+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
74424+ spin_unlock(&gr_learn_lock);
74425+ wake_up_interruptible(&learn_wait);
74426+ return;
74427+ }
74428+ if (learn_buffer == NULL) {
74429+ spin_unlock(&gr_learn_lock);
74430+ return;
74431+ }
74432+
74433+ va_start(args, fmt);
74434+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
74435+ va_end(args);
74436+
74437+ learn_buffer_len += len + 1;
74438+
74439+ spin_unlock(&gr_learn_lock);
74440+ wake_up_interruptible(&learn_wait);
74441+
74442+ return;
74443+}
74444+
74445+static int
74446+open_learn(struct inode *inode, struct file *file)
74447+{
74448+ if (file->f_mode & FMODE_READ && gr_learn_attached)
74449+ return -EBUSY;
74450+ if (file->f_mode & FMODE_READ) {
74451+ int retval = 0;
74452+ mutex_lock(&gr_learn_user_mutex);
74453+ if (learn_buffer == NULL)
74454+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
74455+ if (learn_buffer_user == NULL)
74456+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
74457+ if (learn_buffer == NULL) {
74458+ retval = -ENOMEM;
74459+ goto out_error;
74460+ }
74461+ if (learn_buffer_user == NULL) {
74462+ retval = -ENOMEM;
74463+ goto out_error;
74464+ }
74465+ learn_buffer_len = 0;
74466+ learn_buffer_user_len = 0;
74467+ gr_learn_attached = 1;
74468+out_error:
74469+ mutex_unlock(&gr_learn_user_mutex);
74470+ return retval;
74471+ }
74472+ return 0;
74473+}
74474+
74475+static int
74476+close_learn(struct inode *inode, struct file *file)
74477+{
74478+ if (file->f_mode & FMODE_READ) {
74479+ char *tmp = NULL;
74480+ mutex_lock(&gr_learn_user_mutex);
74481+ spin_lock(&gr_learn_lock);
74482+ tmp = learn_buffer;
74483+ learn_buffer = NULL;
74484+ spin_unlock(&gr_learn_lock);
74485+ if (tmp)
74486+ vfree(tmp);
74487+ if (learn_buffer_user != NULL) {
74488+ vfree(learn_buffer_user);
74489+ learn_buffer_user = NULL;
74490+ }
74491+ learn_buffer_len = 0;
74492+ learn_buffer_user_len = 0;
74493+ gr_learn_attached = 0;
74494+ mutex_unlock(&gr_learn_user_mutex);
74495+ }
74496+
74497+ return 0;
74498+}
74499+
74500+const struct file_operations grsec_fops = {
74501+ .read = read_learn,
74502+ .write = write_grsec_handler,
74503+ .open = open_learn,
74504+ .release = close_learn,
74505+ .poll = poll_learn,
74506+};
74507diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
74508new file mode 100644
74509index 0000000..3f8ade0
74510--- /dev/null
74511+++ b/grsecurity/gracl_policy.c
74512@@ -0,0 +1,1782 @@
74513+#include <linux/kernel.h>
74514+#include <linux/module.h>
74515+#include <linux/sched.h>
74516+#include <linux/mm.h>
74517+#include <linux/file.h>
74518+#include <linux/fs.h>
74519+#include <linux/namei.h>
74520+#include <linux/mount.h>
74521+#include <linux/tty.h>
74522+#include <linux/proc_fs.h>
74523+#include <linux/lglock.h>
74524+#include <linux/slab.h>
74525+#include <linux/vmalloc.h>
74526+#include <linux/types.h>
74527+#include <linux/sysctl.h>
74528+#include <linux/netdevice.h>
74529+#include <linux/ptrace.h>
74530+#include <linux/gracl.h>
74531+#include <linux/gralloc.h>
74532+#include <linux/security.h>
74533+#include <linux/grinternal.h>
74534+#include <linux/pid_namespace.h>
74535+#include <linux/stop_machine.h>
74536+#include <linux/fdtable.h>
74537+#include <linux/percpu.h>
74538+#include <linux/lglock.h>
74539+#include <linux/hugetlb.h>
74540+#include <linux/posix-timers.h>
74541+#include "../fs/mount.h"
74542+
74543+#include <asm/uaccess.h>
74544+#include <asm/errno.h>
74545+#include <asm/mman.h>
74546+
74547+extern struct gr_policy_state *polstate;
74548+
74549+#define FOR_EACH_ROLE_START(role) \
74550+ role = polstate->role_list; \
74551+ while (role) {
74552+
74553+#define FOR_EACH_ROLE_END(role) \
74554+ role = role->prev; \
74555+ }
74556+
74557+struct path gr_real_root;
74558+
74559+extern struct gr_alloc_state *current_alloc_state;
74560+
74561+u16 acl_sp_role_value;
74562+
74563+static DEFINE_MUTEX(gr_dev_mutex);
74564+
74565+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
74566+extern void gr_clear_learn_entries(void);
74567+
74568+struct gr_arg *gr_usermode __read_only;
74569+unsigned char *gr_system_salt __read_only;
74570+unsigned char *gr_system_sum __read_only;
74571+
74572+static unsigned int gr_auth_attempts = 0;
74573+static unsigned long gr_auth_expires = 0UL;
74574+
74575+struct acl_object_label *fakefs_obj_rw;
74576+struct acl_object_label *fakefs_obj_rwx;
74577+
74578+extern int gr_init_uidset(void);
74579+extern void gr_free_uidset(void);
74580+extern void gr_remove_uid(uid_t uid);
74581+extern int gr_find_uid(uid_t uid);
74582+
74583+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename);
74584+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
74585+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
74586+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
74587+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
74588+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
74589+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
74590+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
74591+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
74592+extern struct acl_subject_label *lookup_acl_subj_label(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
74593+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
74594+extern void assign_special_role(const char *rolename);
74595+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
74596+extern int gr_rbac_disable(void *unused);
74597+extern void gr_enable_rbac_system(void);
74598+
74599+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
74600+{
74601+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
74602+ return -EFAULT;
74603+
74604+ return 0;
74605+}
74606+
74607+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
74608+{
74609+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
74610+ return -EFAULT;
74611+
74612+ return 0;
74613+}
74614+
74615+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
74616+{
74617+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
74618+ return -EFAULT;
74619+
74620+ return 0;
74621+}
74622+
74623+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
74624+{
74625+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
74626+ return -EFAULT;
74627+
74628+ return 0;
74629+}
74630+
74631+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
74632+{
74633+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
74634+ return -EFAULT;
74635+
74636+ return 0;
74637+}
74638+
74639+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
74640+{
74641+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
74642+ return -EFAULT;
74643+
74644+ return 0;
74645+}
74646+
74647+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
74648+{
74649+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
74650+ return -EFAULT;
74651+
74652+ return 0;
74653+}
74654+
74655+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
74656+{
74657+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
74658+ return -EFAULT;
74659+
74660+ return 0;
74661+}
74662+
74663+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
74664+{
74665+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
74666+ return -EFAULT;
74667+
74668+ return 0;
74669+}
74670+
74671+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
74672+{
74673+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
74674+ return -EFAULT;
74675+
74676+ if (((uwrap->version != GRSECURITY_VERSION) &&
74677+ (uwrap->version != 0x2901)) ||
74678+ (uwrap->size != sizeof(struct gr_arg)))
74679+ return -EINVAL;
74680+
74681+ return 0;
74682+}
74683+
74684+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
74685+{
74686+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
74687+ return -EFAULT;
74688+
74689+ return 0;
74690+}
74691+
74692+static size_t get_gr_arg_wrapper_size_normal(void)
74693+{
74694+ return sizeof(struct gr_arg_wrapper);
74695+}
74696+
74697+#ifdef CONFIG_COMPAT
74698+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
74699+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
74700+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
74701+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
74702+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
74703+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
74704+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
74705+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
74706+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
74707+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
74708+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
74709+extern size_t get_gr_arg_wrapper_size_compat(void);
74710+
74711+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
74712+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
74713+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
74714+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
74715+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
74716+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
74717+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
74718+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
74719+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
74720+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
74721+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
74722+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
74723+
74724+#else
74725+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
74726+#define copy_gr_arg copy_gr_arg_normal
74727+#define copy_gr_hash_struct copy_gr_hash_struct_normal
74728+#define copy_acl_object_label copy_acl_object_label_normal
74729+#define copy_acl_subject_label copy_acl_subject_label_normal
74730+#define copy_acl_role_label copy_acl_role_label_normal
74731+#define copy_acl_ip_label copy_acl_ip_label_normal
74732+#define copy_pointer_from_array copy_pointer_from_array_normal
74733+#define copy_sprole_pw copy_sprole_pw_normal
74734+#define copy_role_transition copy_role_transition_normal
74735+#define copy_role_allowed_ip copy_role_allowed_ip_normal
74736+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
74737+#endif
74738+
74739+static struct acl_subject_label *
74740+lookup_subject_map(const struct acl_subject_label *userp)
74741+{
74742+ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
74743+ struct subject_map *match;
74744+
74745+ match = polstate->subj_map_set.s_hash[index];
74746+
74747+ while (match && match->user != userp)
74748+ match = match->next;
74749+
74750+ if (match != NULL)
74751+ return match->kernel;
74752+ else
74753+ return NULL;
74754+}
74755+
74756+static void
74757+insert_subj_map_entry(struct subject_map *subjmap)
74758+{
74759+ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
74760+ struct subject_map **curr;
74761+
74762+ subjmap->prev = NULL;
74763+
74764+ curr = &polstate->subj_map_set.s_hash[index];
74765+ if (*curr != NULL)
74766+ (*curr)->prev = subjmap;
74767+
74768+ subjmap->next = *curr;
74769+ *curr = subjmap;
74770+
74771+ return;
74772+}
74773+
74774+static void
74775+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
74776+{
74777+ unsigned int index =
74778+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
74779+ struct acl_role_label **curr;
74780+ struct acl_role_label *tmp, *tmp2;
74781+
74782+ curr = &polstate->acl_role_set.r_hash[index];
74783+
74784+ /* simple case, slot is empty, just set it to our role */
74785+ if (*curr == NULL) {
74786+ *curr = role;
74787+ } else {
74788+ /* example:
74789+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
74790+ 2 -> 3
74791+ */
74792+ /* first check to see if we can already be reached via this slot */
74793+ tmp = *curr;
74794+ while (tmp && tmp != role)
74795+ tmp = tmp->next;
74796+ if (tmp == role) {
74797+ /* we don't need to add ourselves to this slot's chain */
74798+ return;
74799+ }
74800+ /* we need to add ourselves to this chain, two cases */
74801+ if (role->next == NULL) {
74802+ /* simple case, append the current chain to our role */
74803+ role->next = *curr;
74804+ *curr = role;
74805+ } else {
74806+ /* 1 -> 2 -> 3 -> 4
74807+ 2 -> 3 -> 4
74808+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
74809+ */
74810+ /* trickier case: walk our role's chain until we find
74811+ the role for the start of the current slot's chain */
74812+ tmp = role;
74813+ tmp2 = *curr;
74814+ while (tmp->next && tmp->next != tmp2)
74815+ tmp = tmp->next;
74816+ if (tmp->next == tmp2) {
74817+ /* from example above, we found 3, so just
74818+ replace this slot's chain with ours */
74819+ *curr = role;
74820+ } else {
74821+ /* we didn't find a subset of our role's chain
74822+ in the current slot's chain, so append their
74823+ chain to ours, and set us as the first role in
74824+ the slot's chain
74825+
74826+ we could fold this case with the case above,
74827+ but making it explicit for clarity
74828+ */
74829+ tmp->next = tmp2;
74830+ *curr = role;
74831+ }
74832+ }
74833+ }
74834+
74835+ return;
74836+}
74837+
74838+static void
74839+insert_acl_role_label(struct acl_role_label *role)
74840+{
74841+ int i;
74842+
74843+ if (polstate->role_list == NULL) {
74844+ polstate->role_list = role;
74845+ role->prev = NULL;
74846+ } else {
74847+ role->prev = polstate->role_list;
74848+ polstate->role_list = role;
74849+ }
74850+
74851+ /* used for hash chains */
74852+ role->next = NULL;
74853+
74854+ if (role->roletype & GR_ROLE_DOMAIN) {
74855+ for (i = 0; i < role->domain_child_num; i++)
74856+ __insert_acl_role_label(role, role->domain_children[i]);
74857+ } else
74858+ __insert_acl_role_label(role, role->uidgid);
74859+}
74860+
74861+static int
74862+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
74863+{
74864+ struct name_entry **curr, *nentry;
74865+ struct inodev_entry *ientry;
74866+ unsigned int len = strlen(name);
74867+ unsigned int key = full_name_hash(name, len);
74868+ unsigned int index = key % polstate->name_set.n_size;
74869+
74870+ curr = &polstate->name_set.n_hash[index];
74871+
74872+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
74873+ curr = &((*curr)->next);
74874+
74875+ if (*curr != NULL)
74876+ return 1;
74877+
74878+ nentry = acl_alloc(sizeof (struct name_entry));
74879+ if (nentry == NULL)
74880+ return 0;
74881+ ientry = acl_alloc(sizeof (struct inodev_entry));
74882+ if (ientry == NULL)
74883+ return 0;
74884+ ientry->nentry = nentry;
74885+
74886+ nentry->key = key;
74887+ nentry->name = name;
74888+ nentry->inode = inode;
74889+ nentry->device = device;
74890+ nentry->len = len;
74891+ nentry->deleted = deleted;
74892+
74893+ nentry->prev = NULL;
74894+ curr = &polstate->name_set.n_hash[index];
74895+ if (*curr != NULL)
74896+ (*curr)->prev = nentry;
74897+ nentry->next = *curr;
74898+ *curr = nentry;
74899+
74900+ /* insert us into the table searchable by inode/dev */
74901+ __insert_inodev_entry(polstate, ientry);
74902+
74903+ return 1;
74904+}
74905+
74906+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
74907+
74908+static void *
74909+create_table(__u32 * len, int elementsize)
74910+{
74911+ unsigned int table_sizes[] = {
74912+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
74913+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
74914+ 4194301, 8388593, 16777213, 33554393, 67108859
74915+ };
74916+ void *newtable = NULL;
74917+ unsigned int pwr = 0;
74918+
74919+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
74920+ table_sizes[pwr] <= *len)
74921+ pwr++;
74922+
74923+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
74924+ return newtable;
74925+
74926+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
74927+ newtable =
74928+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
74929+ else
74930+ newtable = vmalloc(table_sizes[pwr] * elementsize);
74931+
74932+ *len = table_sizes[pwr];
74933+
74934+ return newtable;
74935+}
74936+
74937+static int
74938+init_variables(const struct gr_arg *arg, bool reload)
74939+{
74940+ struct task_struct *reaper = init_pid_ns.child_reaper;
74941+ unsigned int stacksize;
74942+
74943+ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
74944+ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
74945+ polstate->name_set.n_size = arg->role_db.num_objects;
74946+ polstate->inodev_set.i_size = arg->role_db.num_objects;
74947+
74948+ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
74949+ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
74950+ return 1;
74951+
74952+ if (!reload) {
74953+ if (!gr_init_uidset())
74954+ return 1;
74955+ }
74956+
74957+ /* set up the stack that holds allocation info */
74958+
74959+ stacksize = arg->role_db.num_pointers + 5;
74960+
74961+ if (!acl_alloc_stack_init(stacksize))
74962+ return 1;
74963+
74964+ if (!reload) {
74965+ /* grab reference for the real root dentry and vfsmount */
74966+ get_fs_root(reaper->fs, &gr_real_root);
74967+
74968+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
74969+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
74970+#endif
74971+
74972+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
74973+ if (fakefs_obj_rw == NULL)
74974+ return 1;
74975+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
74976+
74977+ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
74978+ if (fakefs_obj_rwx == NULL)
74979+ return 1;
74980+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
74981+ }
74982+
74983+ polstate->subj_map_set.s_hash =
74984+ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
74985+ polstate->acl_role_set.r_hash =
74986+ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
74987+ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
74988+ polstate->inodev_set.i_hash =
74989+ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
74990+
74991+ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
74992+ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
74993+ return 1;
74994+
74995+ memset(polstate->subj_map_set.s_hash, 0,
74996+ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
74997+ memset(polstate->acl_role_set.r_hash, 0,
74998+ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
74999+ memset(polstate->name_set.n_hash, 0,
75000+ sizeof (struct name_entry *) * polstate->name_set.n_size);
75001+ memset(polstate->inodev_set.i_hash, 0,
75002+ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
75003+
75004+ return 0;
75005+}
75006+
75007+/* free information not needed after startup
75008+ currently contains user->kernel pointer mappings for subjects
75009+*/
75010+
75011+static void
75012+free_init_variables(void)
75013+{
75014+ __u32 i;
75015+
75016+ if (polstate->subj_map_set.s_hash) {
75017+ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
75018+ if (polstate->subj_map_set.s_hash[i]) {
75019+ kfree(polstate->subj_map_set.s_hash[i]);
75020+ polstate->subj_map_set.s_hash[i] = NULL;
75021+ }
75022+ }
75023+
75024+ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
75025+ PAGE_SIZE)
75026+ kfree(polstate->subj_map_set.s_hash);
75027+ else
75028+ vfree(polstate->subj_map_set.s_hash);
75029+ }
75030+
75031+ return;
75032+}
75033+
75034+static void
75035+free_variables(bool reload)
75036+{
75037+ struct acl_subject_label *s;
75038+ struct acl_role_label *r;
75039+ struct task_struct *task, *task2;
75040+ unsigned int x;
75041+
75042+ if (!reload) {
75043+ gr_clear_learn_entries();
75044+
75045+ read_lock(&tasklist_lock);
75046+ do_each_thread(task2, task) {
75047+ task->acl_sp_role = 0;
75048+ task->acl_role_id = 0;
75049+ task->inherited = 0;
75050+ task->acl = NULL;
75051+ task->role = NULL;
75052+ } while_each_thread(task2, task);
75053+ read_unlock(&tasklist_lock);
75054+
75055+ kfree(fakefs_obj_rw);
75056+ fakefs_obj_rw = NULL;
75057+ kfree(fakefs_obj_rwx);
75058+ fakefs_obj_rwx = NULL;
75059+
75060+ /* release the reference to the real root dentry and vfsmount */
75061+ path_put(&gr_real_root);
75062+ memset(&gr_real_root, 0, sizeof(gr_real_root));
75063+ }
75064+
75065+ /* free all object hash tables */
75066+
75067+ FOR_EACH_ROLE_START(r)
75068+ if (r->subj_hash == NULL)
75069+ goto next_role;
75070+ FOR_EACH_SUBJECT_START(r, s, x)
75071+ if (s->obj_hash == NULL)
75072+ break;
75073+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
75074+ kfree(s->obj_hash);
75075+ else
75076+ vfree(s->obj_hash);
75077+ FOR_EACH_SUBJECT_END(s, x)
75078+ FOR_EACH_NESTED_SUBJECT_START(r, s)
75079+ if (s->obj_hash == NULL)
75080+ break;
75081+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
75082+ kfree(s->obj_hash);
75083+ else
75084+ vfree(s->obj_hash);
75085+ FOR_EACH_NESTED_SUBJECT_END(s)
75086+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
75087+ kfree(r->subj_hash);
75088+ else
75089+ vfree(r->subj_hash);
75090+ r->subj_hash = NULL;
75091+next_role:
75092+ FOR_EACH_ROLE_END(r)
75093+
75094+ acl_free_all();
75095+
75096+ if (polstate->acl_role_set.r_hash) {
75097+ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
75098+ PAGE_SIZE)
75099+ kfree(polstate->acl_role_set.r_hash);
75100+ else
75101+ vfree(polstate->acl_role_set.r_hash);
75102+ }
75103+ if (polstate->name_set.n_hash) {
75104+ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
75105+ PAGE_SIZE)
75106+ kfree(polstate->name_set.n_hash);
75107+ else
75108+ vfree(polstate->name_set.n_hash);
75109+ }
75110+
75111+ if (polstate->inodev_set.i_hash) {
75112+ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
75113+ PAGE_SIZE)
75114+ kfree(polstate->inodev_set.i_hash);
75115+ else
75116+ vfree(polstate->inodev_set.i_hash);
75117+ }
75118+
75119+ if (!reload)
75120+ gr_free_uidset();
75121+
75122+ memset(&polstate->name_set, 0, sizeof (struct name_db));
75123+ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
75124+ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
75125+ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
75126+
75127+ polstate->default_role = NULL;
75128+ polstate->kernel_role = NULL;
75129+ polstate->role_list = NULL;
75130+
75131+ return;
75132+}
75133+
75134+static struct acl_subject_label *
75135+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
75136+
75137+static int alloc_and_copy_string(char **name, unsigned int maxlen)
75138+{
75139+ unsigned int len = strnlen_user(*name, maxlen);
75140+ char *tmp;
75141+
75142+ if (!len || len >= maxlen)
75143+ return -EINVAL;
75144+
75145+ if ((tmp = (char *) acl_alloc(len)) == NULL)
75146+ return -ENOMEM;
75147+
75148+ if (copy_from_user(tmp, *name, len))
75149+ return -EFAULT;
75150+
75151+ tmp[len-1] = '\0';
75152+ *name = tmp;
75153+
75154+ return 0;
75155+}
75156+
75157+static int
75158+copy_user_glob(struct acl_object_label *obj)
75159+{
75160+ struct acl_object_label *g_tmp, **guser;
75161+ int error;
75162+
75163+ if (obj->globbed == NULL)
75164+ return 0;
75165+
75166+ guser = &obj->globbed;
75167+ while (*guser) {
75168+ g_tmp = (struct acl_object_label *)
75169+ acl_alloc(sizeof (struct acl_object_label));
75170+ if (g_tmp == NULL)
75171+ return -ENOMEM;
75172+
75173+ if (copy_acl_object_label(g_tmp, *guser))
75174+ return -EFAULT;
75175+
75176+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
75177+ if (error)
75178+ return error;
75179+
75180+ *guser = g_tmp;
75181+ guser = &(g_tmp->next);
75182+ }
75183+
75184+ return 0;
75185+}
75186+
75187+static int
75188+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
75189+ struct acl_role_label *role)
75190+{
75191+ struct acl_object_label *o_tmp;
75192+ int ret;
75193+
75194+ while (userp) {
75195+ if ((o_tmp = (struct acl_object_label *)
75196+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
75197+ return -ENOMEM;
75198+
75199+ if (copy_acl_object_label(o_tmp, userp))
75200+ return -EFAULT;
75201+
75202+ userp = o_tmp->prev;
75203+
75204+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
75205+ if (ret)
75206+ return ret;
75207+
75208+ insert_acl_obj_label(o_tmp, subj);
75209+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
75210+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
75211+ return -ENOMEM;
75212+
75213+ ret = copy_user_glob(o_tmp);
75214+ if (ret)
75215+ return ret;
75216+
75217+ if (o_tmp->nested) {
75218+ int already_copied;
75219+
75220+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
75221+ if (IS_ERR(o_tmp->nested))
75222+ return PTR_ERR(o_tmp->nested);
75223+
75224+ /* insert into nested subject list if we haven't copied this one yet
75225+ to prevent duplicate entries */
75226+ if (!already_copied) {
75227+ o_tmp->nested->next = role->hash->first;
75228+ role->hash->first = o_tmp->nested;
75229+ }
75230+ }
75231+ }
75232+
75233+ return 0;
75234+}
75235+
75236+static __u32
75237+count_user_subjs(struct acl_subject_label *userp)
75238+{
75239+ struct acl_subject_label s_tmp;
75240+ __u32 num = 0;
75241+
75242+ while (userp) {
75243+ if (copy_acl_subject_label(&s_tmp, userp))
75244+ break;
75245+
75246+ userp = s_tmp.prev;
75247+ }
75248+
75249+ return num;
75250+}
75251+
75252+static int
75253+copy_user_allowedips(struct acl_role_label *rolep)
75254+{
75255+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
75256+
75257+ ruserip = rolep->allowed_ips;
75258+
75259+ while (ruserip) {
75260+ rlast = rtmp;
75261+
75262+ if ((rtmp = (struct role_allowed_ip *)
75263+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
75264+ return -ENOMEM;
75265+
75266+ if (copy_role_allowed_ip(rtmp, ruserip))
75267+ return -EFAULT;
75268+
75269+ ruserip = rtmp->prev;
75270+
75271+ if (!rlast) {
75272+ rtmp->prev = NULL;
75273+ rolep->allowed_ips = rtmp;
75274+ } else {
75275+ rlast->next = rtmp;
75276+ rtmp->prev = rlast;
75277+ }
75278+
75279+ if (!ruserip)
75280+ rtmp->next = NULL;
75281+ }
75282+
75283+ return 0;
75284+}
75285+
75286+static int
75287+copy_user_transitions(struct acl_role_label *rolep)
75288+{
75289+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
75290+ int error;
75291+
75292+ rusertp = rolep->transitions;
75293+
75294+ while (rusertp) {
75295+ rlast = rtmp;
75296+
75297+ if ((rtmp = (struct role_transition *)
75298+ acl_alloc(sizeof (struct role_transition))) == NULL)
75299+ return -ENOMEM;
75300+
75301+ if (copy_role_transition(rtmp, rusertp))
75302+ return -EFAULT;
75303+
75304+ rusertp = rtmp->prev;
75305+
75306+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
75307+ if (error)
75308+ return error;
75309+
75310+ if (!rlast) {
75311+ rtmp->prev = NULL;
75312+ rolep->transitions = rtmp;
75313+ } else {
75314+ rlast->next = rtmp;
75315+ rtmp->prev = rlast;
75316+ }
75317+
75318+ if (!rusertp)
75319+ rtmp->next = NULL;
75320+ }
75321+
75322+ return 0;
75323+}
75324+
75325+static __u32 count_user_objs(const struct acl_object_label __user *userp)
75326+{
75327+ struct acl_object_label o_tmp;
75328+ __u32 num = 0;
75329+
75330+ while (userp) {
75331+ if (copy_acl_object_label(&o_tmp, userp))
75332+ break;
75333+
75334+ userp = o_tmp.prev;
75335+ num++;
75336+ }
75337+
75338+ return num;
75339+}
75340+
75341+static struct acl_subject_label *
75342+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
75343+{
75344+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
75345+ __u32 num_objs;
75346+ struct acl_ip_label **i_tmp, *i_utmp2;
75347+ struct gr_hash_struct ghash;
75348+ struct subject_map *subjmap;
75349+ unsigned int i_num;
75350+ int err;
75351+
75352+ if (already_copied != NULL)
75353+ *already_copied = 0;
75354+
75355+ s_tmp = lookup_subject_map(userp);
75356+
75357+ /* we've already copied this subject into the kernel, just return
75358+ the reference to it, and don't copy it over again
75359+ */
75360+ if (s_tmp) {
75361+ if (already_copied != NULL)
75362+ *already_copied = 1;
75363+ return(s_tmp);
75364+ }
75365+
75366+ if ((s_tmp = (struct acl_subject_label *)
75367+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
75368+ return ERR_PTR(-ENOMEM);
75369+
75370+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
75371+ if (subjmap == NULL)
75372+ return ERR_PTR(-ENOMEM);
75373+
75374+ subjmap->user = userp;
75375+ subjmap->kernel = s_tmp;
75376+ insert_subj_map_entry(subjmap);
75377+
75378+ if (copy_acl_subject_label(s_tmp, userp))
75379+ return ERR_PTR(-EFAULT);
75380+
75381+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
75382+ if (err)
75383+ return ERR_PTR(err);
75384+
75385+ if (!strcmp(s_tmp->filename, "/"))
75386+ role->root_label = s_tmp;
75387+
75388+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
75389+ return ERR_PTR(-EFAULT);
75390+
75391+ /* copy user and group transition tables */
75392+
75393+ if (s_tmp->user_trans_num) {
75394+ uid_t *uidlist;
75395+
75396+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
75397+ if (uidlist == NULL)
75398+ return ERR_PTR(-ENOMEM);
75399+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
75400+ return ERR_PTR(-EFAULT);
75401+
75402+ s_tmp->user_transitions = uidlist;
75403+ }
75404+
75405+ if (s_tmp->group_trans_num) {
75406+ gid_t *gidlist;
75407+
75408+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
75409+ if (gidlist == NULL)
75410+ return ERR_PTR(-ENOMEM);
75411+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
75412+ return ERR_PTR(-EFAULT);
75413+
75414+ s_tmp->group_transitions = gidlist;
75415+ }
75416+
75417+ /* set up object hash table */
75418+ num_objs = count_user_objs(ghash.first);
75419+
75420+ s_tmp->obj_hash_size = num_objs;
75421+ s_tmp->obj_hash =
75422+ (struct acl_object_label **)
75423+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
75424+
75425+ if (!s_tmp->obj_hash)
75426+ return ERR_PTR(-ENOMEM);
75427+
75428+ memset(s_tmp->obj_hash, 0,
75429+ s_tmp->obj_hash_size *
75430+ sizeof (struct acl_object_label *));
75431+
75432+ /* add in objects */
75433+ err = copy_user_objs(ghash.first, s_tmp, role);
75434+
75435+ if (err)
75436+ return ERR_PTR(err);
75437+
75438+ /* set pointer for parent subject */
75439+ if (s_tmp->parent_subject) {
75440+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
75441+
75442+ if (IS_ERR(s_tmp2))
75443+ return s_tmp2;
75444+
75445+ s_tmp->parent_subject = s_tmp2;
75446+ }
75447+
75448+ /* add in ip acls */
75449+
75450+ if (!s_tmp->ip_num) {
75451+ s_tmp->ips = NULL;
75452+ goto insert;
75453+ }
75454+
75455+ i_tmp =
75456+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
75457+ sizeof (struct acl_ip_label *));
75458+
75459+ if (!i_tmp)
75460+ return ERR_PTR(-ENOMEM);
75461+
75462+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
75463+ *(i_tmp + i_num) =
75464+ (struct acl_ip_label *)
75465+ acl_alloc(sizeof (struct acl_ip_label));
75466+ if (!*(i_tmp + i_num))
75467+ return ERR_PTR(-ENOMEM);
75468+
75469+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
75470+ return ERR_PTR(-EFAULT);
75471+
75472+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
75473+ return ERR_PTR(-EFAULT);
75474+
75475+ if ((*(i_tmp + i_num))->iface == NULL)
75476+ continue;
75477+
75478+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
75479+ if (err)
75480+ return ERR_PTR(err);
75481+ }
75482+
75483+ s_tmp->ips = i_tmp;
75484+
75485+insert:
75486+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
75487+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
75488+ return ERR_PTR(-ENOMEM);
75489+
75490+ return s_tmp;
75491+}
75492+
75493+static int
75494+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
75495+{
75496+ struct acl_subject_label s_pre;
75497+ struct acl_subject_label * ret;
75498+ int err;
75499+
75500+ while (userp) {
75501+ if (copy_acl_subject_label(&s_pre, userp))
75502+ return -EFAULT;
75503+
75504+ ret = do_copy_user_subj(userp, role, NULL);
75505+
75506+ err = PTR_ERR(ret);
75507+ if (IS_ERR(ret))
75508+ return err;
75509+
75510+ insert_acl_subj_label(ret, role);
75511+
75512+ userp = s_pre.prev;
75513+ }
75514+
75515+ return 0;
75516+}
75517+
75518+static int
75519+copy_user_acl(struct gr_arg *arg)
75520+{
75521+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
75522+ struct acl_subject_label *subj_list;
75523+ struct sprole_pw *sptmp;
75524+ struct gr_hash_struct *ghash;
75525+ uid_t *domainlist;
75526+ unsigned int r_num;
75527+ int err = 0;
75528+ __u16 i;
75529+ __u32 num_subjs;
75530+
75531+ /* we need a default and kernel role */
75532+ if (arg->role_db.num_roles < 2)
75533+ return -EINVAL;
75534+
75535+ /* copy special role authentication info from userspace */
75536+
75537+ polstate->num_sprole_pws = arg->num_sprole_pws;
75538+ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
75539+
75540+ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
75541+ return -ENOMEM;
75542+
75543+ for (i = 0; i < polstate->num_sprole_pws; i++) {
75544+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
75545+ if (!sptmp)
75546+ return -ENOMEM;
75547+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
75548+ return -EFAULT;
75549+
75550+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
75551+ if (err)
75552+ return err;
75553+
75554+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
75555+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
75556+#endif
75557+
75558+ polstate->acl_special_roles[i] = sptmp;
75559+ }
75560+
75561+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
75562+
75563+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
75564+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
75565+
75566+ if (!r_tmp)
75567+ return -ENOMEM;
75568+
75569+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
75570+ return -EFAULT;
75571+
75572+ if (copy_acl_role_label(r_tmp, r_utmp2))
75573+ return -EFAULT;
75574+
75575+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
75576+ if (err)
75577+ return err;
75578+
75579+ if (!strcmp(r_tmp->rolename, "default")
75580+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
75581+ polstate->default_role = r_tmp;
75582+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
75583+ polstate->kernel_role = r_tmp;
75584+ }
75585+
75586+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
75587+ return -ENOMEM;
75588+
75589+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
75590+ return -EFAULT;
75591+
75592+ r_tmp->hash = ghash;
75593+
75594+ num_subjs = count_user_subjs(r_tmp->hash->first);
75595+
75596+ r_tmp->subj_hash_size = num_subjs;
75597+ r_tmp->subj_hash =
75598+ (struct acl_subject_label **)
75599+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
75600+
75601+ if (!r_tmp->subj_hash)
75602+ return -ENOMEM;
75603+
75604+ err = copy_user_allowedips(r_tmp);
75605+ if (err)
75606+ return err;
75607+
75608+ /* copy domain info */
75609+ if (r_tmp->domain_children != NULL) {
75610+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
75611+ if (domainlist == NULL)
75612+ return -ENOMEM;
75613+
75614+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
75615+ return -EFAULT;
75616+
75617+ r_tmp->domain_children = domainlist;
75618+ }
75619+
75620+ err = copy_user_transitions(r_tmp);
75621+ if (err)
75622+ return err;
75623+
75624+ memset(r_tmp->subj_hash, 0,
75625+ r_tmp->subj_hash_size *
75626+ sizeof (struct acl_subject_label *));
75627+
75628+ /* acquire the list of subjects, then NULL out
75629+ the list prior to parsing the subjects for this role,
75630+ as during this parsing the list is replaced with a list
75631+ of *nested* subjects for the role
75632+ */
75633+ subj_list = r_tmp->hash->first;
75634+
75635+ /* set nested subject list to null */
75636+ r_tmp->hash->first = NULL;
75637+
75638+ err = copy_user_subjs(subj_list, r_tmp);
75639+
75640+ if (err)
75641+ return err;
75642+
75643+ insert_acl_role_label(r_tmp);
75644+ }
75645+
75646+ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
75647+ return -EINVAL;
75648+
75649+ return err;
75650+}
75651+
75652+static int gracl_reload_apply_policies(void *reload)
75653+{
75654+ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
75655+ struct task_struct *task, *task2;
75656+ struct acl_role_label *role, *rtmp;
75657+ struct acl_subject_label *subj;
75658+ const struct cred *cred;
75659+ int role_applied;
75660+ int ret = 0;
75661+
75662+ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
75663+ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
75664+
75665+ /* first make sure we'll be able to apply the new policy cleanly */
75666+ do_each_thread(task2, task) {
75667+ if (task->exec_file == NULL)
75668+ continue;
75669+ role_applied = 0;
75670+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
75671+ /* preserve special roles */
75672+ FOR_EACH_ROLE_START(role)
75673+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
75674+ rtmp = task->role;
75675+ task->role = role;
75676+ role_applied = 1;
75677+ break;
75678+ }
75679+ FOR_EACH_ROLE_END(role)
75680+ }
75681+ if (!role_applied) {
75682+ cred = __task_cred(task);
75683+ rtmp = task->role;
75684+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
75685+ }
75686+ /* this handles non-nested inherited subjects, nested subjects will still
75687+ be dropped currently */
75688+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
75689+ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL);
75690+ /* change the role back so that we've made no modifications to the policy */
75691+ task->role = rtmp;
75692+
75693+ if (subj == NULL || task->tmpacl == NULL) {
75694+ ret = -EINVAL;
75695+ goto out;
75696+ }
75697+ } while_each_thread(task2, task);
75698+
75699+ /* now actually apply the policy */
75700+
75701+ do_each_thread(task2, task) {
75702+ if (task->exec_file) {
75703+ role_applied = 0;
75704+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
75705+ /* preserve special roles */
75706+ FOR_EACH_ROLE_START(role)
75707+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
75708+ task->role = role;
75709+ role_applied = 1;
75710+ break;
75711+ }
75712+ FOR_EACH_ROLE_END(role)
75713+ }
75714+ if (!role_applied) {
75715+ cred = __task_cred(task);
75716+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
75717+ }
75718+ /* this handles non-nested inherited subjects, nested subjects will still
75719+ be dropped currently */
75720+ if (!reload_state->oldmode && task->inherited)
75721+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
75722+ else {
75723+ /* looked up and tagged to the task previously */
75724+ subj = task->tmpacl;
75725+ }
75726+ /* subj will be non-null */
75727+ __gr_apply_subject_to_task(polstate, task, subj);
75728+ if (reload_state->oldmode) {
75729+ task->acl_role_id = 0;
75730+ task->acl_sp_role = 0;
75731+ task->inherited = 0;
75732+ }
75733+ } else {
75734+ // it's a kernel process
75735+ task->role = polstate->kernel_role;
75736+ task->acl = polstate->kernel_role->root_label;
75737+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
75738+ task->acl->mode &= ~GR_PROCFIND;
75739+#endif
75740+ }
75741+ } while_each_thread(task2, task);
75742+
75743+ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
75744+ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
75745+
75746+out:
75747+
75748+ return ret;
75749+}
75750+
75751+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
75752+{
75753+ struct gr_reload_state new_reload_state = { };
75754+ int err;
75755+
75756+ new_reload_state.oldpolicy_ptr = polstate;
75757+ new_reload_state.oldalloc_ptr = current_alloc_state;
75758+ new_reload_state.oldmode = oldmode;
75759+
75760+ current_alloc_state = &new_reload_state.newalloc;
75761+ polstate = &new_reload_state.newpolicy;
75762+
75763+ /* everything relevant is now saved off, copy in the new policy */
75764+ if (init_variables(args, true)) {
75765+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
75766+ err = -ENOMEM;
75767+ goto error;
75768+ }
75769+
75770+ err = copy_user_acl(args);
75771+ free_init_variables();
75772+ if (err)
75773+ goto error;
75774+ /* the new policy is copied in, with the old policy available via saved_state
75775+ first go through applying roles, making sure to preserve special roles
75776+ then apply new subjects, making sure to preserve inherited and nested subjects,
75777+ though currently only inherited subjects will be preserved
75778+ */
75779+ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
75780+ if (err)
75781+ goto error;
75782+
75783+ /* we've now applied the new policy, so restore the old policy state to free it */
75784+ polstate = &new_reload_state.oldpolicy;
75785+ current_alloc_state = &new_reload_state.oldalloc;
75786+ free_variables(true);
75787+
75788+ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
75789+ to running_polstate/current_alloc_state inside stop_machine
75790+ */
75791+ err = 0;
75792+ goto out;
75793+error:
75794+ /* on error of loading the new policy, we'll just keep the previous
75795+ policy set around
75796+ */
75797+ free_variables(true);
75798+
75799+ /* doesn't affect runtime, but maintains consistent state */
75800+out:
75801+ polstate = new_reload_state.oldpolicy_ptr;
75802+ current_alloc_state = new_reload_state.oldalloc_ptr;
75803+
75804+ return err;
75805+}
75806+
75807+static int
75808+gracl_init(struct gr_arg *args)
75809+{
75810+ int error = 0;
75811+
75812+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
75813+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
75814+
75815+ if (init_variables(args, false)) {
75816+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
75817+ error = -ENOMEM;
75818+ goto out;
75819+ }
75820+
75821+ error = copy_user_acl(args);
75822+ free_init_variables();
75823+ if (error)
75824+ goto out;
75825+
75826+ error = gr_set_acls(0);
75827+ if (error)
75828+ goto out;
75829+
75830+ gr_enable_rbac_system();
75831+
75832+ return 0;
75833+
75834+out:
75835+ free_variables(false);
75836+ return error;
75837+}
75838+
75839+static int
75840+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
75841+ unsigned char **sum)
75842+{
75843+ struct acl_role_label *r;
75844+ struct role_allowed_ip *ipp;
75845+ struct role_transition *trans;
75846+ unsigned int i;
75847+ int found = 0;
75848+ u32 curr_ip = current->signal->curr_ip;
75849+
75850+ current->signal->saved_ip = curr_ip;
75851+
75852+ /* check transition table */
75853+
75854+ for (trans = current->role->transitions; trans; trans = trans->next) {
75855+ if (!strcmp(rolename, trans->rolename)) {
75856+ found = 1;
75857+ break;
75858+ }
75859+ }
75860+
75861+ if (!found)
75862+ return 0;
75863+
75864+ /* handle special roles that do not require authentication
75865+ and check ip */
75866+
75867+ FOR_EACH_ROLE_START(r)
75868+ if (!strcmp(rolename, r->rolename) &&
75869+ (r->roletype & GR_ROLE_SPECIAL)) {
75870+ found = 0;
75871+ if (r->allowed_ips != NULL) {
75872+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
75873+ if ((ntohl(curr_ip) & ipp->netmask) ==
75874+ (ntohl(ipp->addr) & ipp->netmask))
75875+ found = 1;
75876+ }
75877+ } else
75878+ found = 2;
75879+ if (!found)
75880+ return 0;
75881+
75882+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
75883+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
75884+ *salt = NULL;
75885+ *sum = NULL;
75886+ return 1;
75887+ }
75888+ }
75889+ FOR_EACH_ROLE_END(r)
75890+
75891+ for (i = 0; i < polstate->num_sprole_pws; i++) {
75892+ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
75893+ *salt = polstate->acl_special_roles[i]->salt;
75894+ *sum = polstate->acl_special_roles[i]->sum;
75895+ return 1;
75896+ }
75897+ }
75898+
75899+ return 0;
75900+}
75901+
75902+int gr_check_secure_terminal(struct task_struct *task)
75903+{
75904+ struct task_struct *p, *p2, *p3;
75905+ struct files_struct *files;
75906+ struct fdtable *fdt;
75907+ struct file *our_file = NULL, *file;
75908+ int i;
75909+
75910+ if (task->signal->tty == NULL)
75911+ return 1;
75912+
75913+ files = get_files_struct(task);
75914+ if (files != NULL) {
75915+ rcu_read_lock();
75916+ fdt = files_fdtable(files);
75917+ for (i=0; i < fdt->max_fds; i++) {
75918+ file = fcheck_files(files, i);
75919+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
75920+ get_file(file);
75921+ our_file = file;
75922+ }
75923+ }
75924+ rcu_read_unlock();
75925+ put_files_struct(files);
75926+ }
75927+
75928+ if (our_file == NULL)
75929+ return 1;
75930+
75931+ read_lock(&tasklist_lock);
75932+ do_each_thread(p2, p) {
75933+ files = get_files_struct(p);
75934+ if (files == NULL ||
75935+ (p->signal && p->signal->tty == task->signal->tty)) {
75936+ if (files != NULL)
75937+ put_files_struct(files);
75938+ continue;
75939+ }
75940+ rcu_read_lock();
75941+ fdt = files_fdtable(files);
75942+ for (i=0; i < fdt->max_fds; i++) {
75943+ file = fcheck_files(files, i);
75944+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
75945+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
75946+ p3 = task;
75947+ while (task_pid_nr(p3) > 0) {
75948+ if (p3 == p)
75949+ break;
75950+ p3 = p3->real_parent;
75951+ }
75952+ if (p3 == p)
75953+ break;
75954+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
75955+ gr_handle_alertkill(p);
75956+ rcu_read_unlock();
75957+ put_files_struct(files);
75958+ read_unlock(&tasklist_lock);
75959+ fput(our_file);
75960+ return 0;
75961+ }
75962+ }
75963+ rcu_read_unlock();
75964+ put_files_struct(files);
75965+ } while_each_thread(p2, p);
75966+ read_unlock(&tasklist_lock);
75967+
75968+ fput(our_file);
75969+ return 1;
75970+}
75971+
75972+ssize_t
75973+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
75974+{
75975+ struct gr_arg_wrapper uwrap;
75976+ unsigned char *sprole_salt = NULL;
75977+ unsigned char *sprole_sum = NULL;
75978+ int error = 0;
75979+ int error2 = 0;
75980+ size_t req_count = 0;
75981+ unsigned char oldmode = 0;
75982+
75983+ mutex_lock(&gr_dev_mutex);
75984+
75985+ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
75986+ error = -EPERM;
75987+ goto out;
75988+ }
75989+
75990+#ifdef CONFIG_COMPAT
75991+ pax_open_kernel();
75992+ if (is_compat_task()) {
75993+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
75994+ copy_gr_arg = &copy_gr_arg_compat;
75995+ copy_acl_object_label = &copy_acl_object_label_compat;
75996+ copy_acl_subject_label = &copy_acl_subject_label_compat;
75997+ copy_acl_role_label = &copy_acl_role_label_compat;
75998+ copy_acl_ip_label = &copy_acl_ip_label_compat;
75999+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
76000+ copy_role_transition = &copy_role_transition_compat;
76001+ copy_sprole_pw = &copy_sprole_pw_compat;
76002+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
76003+ copy_pointer_from_array = &copy_pointer_from_array_compat;
76004+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
76005+ } else {
76006+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
76007+ copy_gr_arg = &copy_gr_arg_normal;
76008+ copy_acl_object_label = &copy_acl_object_label_normal;
76009+ copy_acl_subject_label = &copy_acl_subject_label_normal;
76010+ copy_acl_role_label = &copy_acl_role_label_normal;
76011+ copy_acl_ip_label = &copy_acl_ip_label_normal;
76012+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
76013+ copy_role_transition = &copy_role_transition_normal;
76014+ copy_sprole_pw = &copy_sprole_pw_normal;
76015+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
76016+ copy_pointer_from_array = &copy_pointer_from_array_normal;
76017+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
76018+ }
76019+ pax_close_kernel();
76020+#endif
76021+
76022+ req_count = get_gr_arg_wrapper_size();
76023+
76024+ if (count != req_count) {
76025+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
76026+ error = -EINVAL;
76027+ goto out;
76028+ }
76029+
76030+
76031+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
76032+ gr_auth_expires = 0;
76033+ gr_auth_attempts = 0;
76034+ }
76035+
76036+ error = copy_gr_arg_wrapper(buf, &uwrap);
76037+ if (error)
76038+ goto out;
76039+
76040+ error = copy_gr_arg(uwrap.arg, gr_usermode);
76041+ if (error)
76042+ goto out;
76043+
76044+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
76045+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
76046+ time_after(gr_auth_expires, get_seconds())) {
76047+ error = -EBUSY;
76048+ goto out;
76049+ }
76050+
76051+ /* if non-root trying to do anything other than use a special role,
76052+ do not attempt authentication, do not count towards authentication
76053+ locking
76054+ */
76055+
76056+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
76057+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
76058+ gr_is_global_nonroot(current_uid())) {
76059+ error = -EPERM;
76060+ goto out;
76061+ }
76062+
76063+ /* ensure pw and special role name are null terminated */
76064+
76065+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
76066+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
76067+
76068+ /* Okay.
76069+ * We have our enough of the argument structure..(we have yet
76070+ * to copy_from_user the tables themselves) . Copy the tables
76071+ * only if we need them, i.e. for loading operations. */
76072+
76073+ switch (gr_usermode->mode) {
76074+ case GR_STATUS:
76075+ if (gr_acl_is_enabled()) {
76076+ error = 1;
76077+ if (!gr_check_secure_terminal(current))
76078+ error = 3;
76079+ } else
76080+ error = 2;
76081+ goto out;
76082+ case GR_SHUTDOWN:
76083+ if (gr_acl_is_enabled() && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
76084+ stop_machine(gr_rbac_disable, NULL, NULL);
76085+ free_variables(false);
76086+ memset(gr_usermode, 0, sizeof(struct gr_arg));
76087+ memset(gr_system_salt, 0, GR_SALT_LEN);
76088+ memset(gr_system_sum, 0, GR_SHA_LEN);
76089+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
76090+ } else if (gr_acl_is_enabled()) {
76091+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
76092+ error = -EPERM;
76093+ } else {
76094+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
76095+ error = -EAGAIN;
76096+ }
76097+ break;
76098+ case GR_ENABLE:
76099+ if (!gr_acl_is_enabled() && !(error2 = gracl_init(gr_usermode)))
76100+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
76101+ else {
76102+ if (gr_acl_is_enabled())
76103+ error = -EAGAIN;
76104+ else
76105+ error = error2;
76106+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
76107+ }
76108+ break;
76109+ case GR_OLDRELOAD:
76110+ oldmode = 1;
76111+ case GR_RELOAD:
76112+ if (!gr_acl_is_enabled()) {
76113+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
76114+ error = -EAGAIN;
76115+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
76116+ error2 = gracl_reload(gr_usermode, oldmode);
76117+ if (!error2)
76118+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
76119+ else {
76120+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
76121+ error = error2;
76122+ }
76123+ } else {
76124+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
76125+ error = -EPERM;
76126+ }
76127+ break;
76128+ case GR_SEGVMOD:
76129+ if (unlikely(!gr_acl_is_enabled())) {
76130+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
76131+ error = -EAGAIN;
76132+ break;
76133+ }
76134+
76135+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
76136+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
76137+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
76138+ struct acl_subject_label *segvacl;
76139+ segvacl =
76140+ lookup_acl_subj_label(gr_usermode->segv_inode,
76141+ gr_usermode->segv_device,
76142+ current->role);
76143+ if (segvacl) {
76144+ segvacl->crashes = 0;
76145+ segvacl->expires = 0;
76146+ }
76147+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
76148+ gr_remove_uid(gr_usermode->segv_uid);
76149+ }
76150+ } else {
76151+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
76152+ error = -EPERM;
76153+ }
76154+ break;
76155+ case GR_SPROLE:
76156+ case GR_SPROLEPAM:
76157+ if (unlikely(!gr_acl_is_enabled())) {
76158+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
76159+ error = -EAGAIN;
76160+ break;
76161+ }
76162+
76163+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
76164+ current->role->expires = 0;
76165+ current->role->auth_attempts = 0;
76166+ }
76167+
76168+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
76169+ time_after(current->role->expires, get_seconds())) {
76170+ error = -EBUSY;
76171+ goto out;
76172+ }
76173+
76174+ if (lookup_special_role_auth
76175+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
76176+ && ((!sprole_salt && !sprole_sum)
76177+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
76178+ char *p = "";
76179+ assign_special_role(gr_usermode->sp_role);
76180+ read_lock(&tasklist_lock);
76181+ if (current->real_parent)
76182+ p = current->real_parent->role->rolename;
76183+ read_unlock(&tasklist_lock);
76184+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
76185+ p, acl_sp_role_value);
76186+ } else {
76187+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
76188+ error = -EPERM;
76189+ if(!(current->role->auth_attempts++))
76190+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
76191+
76192+ goto out;
76193+ }
76194+ break;
76195+ case GR_UNSPROLE:
76196+ if (unlikely(!gr_acl_is_enabled())) {
76197+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
76198+ error = -EAGAIN;
76199+ break;
76200+ }
76201+
76202+ if (current->role->roletype & GR_ROLE_SPECIAL) {
76203+ char *p = "";
76204+ int i = 0;
76205+
76206+ read_lock(&tasklist_lock);
76207+ if (current->real_parent) {
76208+ p = current->real_parent->role->rolename;
76209+ i = current->real_parent->acl_role_id;
76210+ }
76211+ read_unlock(&tasklist_lock);
76212+
76213+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
76214+ gr_set_acls(1);
76215+ } else {
76216+ error = -EPERM;
76217+ goto out;
76218+ }
76219+ break;
76220+ default:
76221+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
76222+ error = -EINVAL;
76223+ break;
76224+ }
76225+
76226+ if (error != -EPERM)
76227+ goto out;
76228+
76229+ if(!(gr_auth_attempts++))
76230+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
76231+
76232+ out:
76233+ mutex_unlock(&gr_dev_mutex);
76234+
76235+ if (!error)
76236+ error = req_count;
76237+
76238+ return error;
76239+}
76240+
76241+int
76242+gr_set_acls(const int type)
76243+{
76244+ struct task_struct *task, *task2;
76245+ struct acl_role_label *role = current->role;
76246+ struct acl_subject_label *subj;
76247+ __u16 acl_role_id = current->acl_role_id;
76248+ const struct cred *cred;
76249+ int ret;
76250+
76251+ rcu_read_lock();
76252+ read_lock(&tasklist_lock);
76253+ read_lock(&grsec_exec_file_lock);
76254+ do_each_thread(task2, task) {
76255+ /* check to see if we're called from the exit handler,
76256+ if so, only replace ACLs that have inherited the admin
76257+ ACL */
76258+
76259+ if (type && (task->role != role ||
76260+ task->acl_role_id != acl_role_id))
76261+ continue;
76262+
76263+ task->acl_role_id = 0;
76264+ task->acl_sp_role = 0;
76265+ task->inherited = 0;
76266+
76267+ if (task->exec_file) {
76268+ cred = __task_cred(task);
76269+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
76270+ subj = __gr_get_subject_for_task(polstate, task, NULL);
76271+ if (subj == NULL) {
76272+ ret = -EINVAL;
76273+ read_unlock(&grsec_exec_file_lock);
76274+ read_unlock(&tasklist_lock);
76275+ rcu_read_unlock();
76276+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
76277+ return ret;
76278+ }
76279+ __gr_apply_subject_to_task(polstate, task, subj);
76280+ } else {
76281+ // it's a kernel process
76282+ task->role = polstate->kernel_role;
76283+ task->acl = polstate->kernel_role->root_label;
76284+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
76285+ task->acl->mode &= ~GR_PROCFIND;
76286+#endif
76287+ }
76288+ } while_each_thread(task2, task);
76289+ read_unlock(&grsec_exec_file_lock);
76290+ read_unlock(&tasklist_lock);
76291+ rcu_read_unlock();
76292+
76293+ return 0;
76294+}
76295diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
76296new file mode 100644
76297index 0000000..39645c9
76298--- /dev/null
76299+++ b/grsecurity/gracl_res.c
76300@@ -0,0 +1,68 @@
76301+#include <linux/kernel.h>
76302+#include <linux/sched.h>
76303+#include <linux/gracl.h>
76304+#include <linux/grinternal.h>
76305+
76306+static const char *restab_log[] = {
76307+ [RLIMIT_CPU] = "RLIMIT_CPU",
76308+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
76309+ [RLIMIT_DATA] = "RLIMIT_DATA",
76310+ [RLIMIT_STACK] = "RLIMIT_STACK",
76311+ [RLIMIT_CORE] = "RLIMIT_CORE",
76312+ [RLIMIT_RSS] = "RLIMIT_RSS",
76313+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
76314+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
76315+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
76316+ [RLIMIT_AS] = "RLIMIT_AS",
76317+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
76318+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
76319+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
76320+ [RLIMIT_NICE] = "RLIMIT_NICE",
76321+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
76322+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
76323+ [GR_CRASH_RES] = "RLIMIT_CRASH"
76324+};
76325+
76326+void
76327+gr_log_resource(const struct task_struct *task,
76328+ const int res, const unsigned long wanted, const int gt)
76329+{
76330+ const struct cred *cred;
76331+ unsigned long rlim;
76332+
76333+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
76334+ return;
76335+
76336+ // not yet supported resource
76337+ if (unlikely(!restab_log[res]))
76338+ return;
76339+
76340+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
76341+ rlim = task_rlimit_max(task, res);
76342+ else
76343+ rlim = task_rlimit(task, res);
76344+
76345+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
76346+ return;
76347+
76348+ rcu_read_lock();
76349+ cred = __task_cred(task);
76350+
76351+ if (res == RLIMIT_NPROC &&
76352+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
76353+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
76354+ goto out_rcu_unlock;
76355+ else if (res == RLIMIT_MEMLOCK &&
76356+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
76357+ goto out_rcu_unlock;
76358+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
76359+ goto out_rcu_unlock;
76360+ rcu_read_unlock();
76361+
76362+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
76363+
76364+ return;
76365+out_rcu_unlock:
76366+ rcu_read_unlock();
76367+ return;
76368+}
76369diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
76370new file mode 100644
76371index 0000000..2040e61
76372--- /dev/null
76373+++ b/grsecurity/gracl_segv.c
76374@@ -0,0 +1,313 @@
76375+#include <linux/kernel.h>
76376+#include <linux/mm.h>
76377+#include <asm/uaccess.h>
76378+#include <asm/errno.h>
76379+#include <asm/mman.h>
76380+#include <net/sock.h>
76381+#include <linux/file.h>
76382+#include <linux/fs.h>
76383+#include <linux/net.h>
76384+#include <linux/in.h>
76385+#include <linux/slab.h>
76386+#include <linux/types.h>
76387+#include <linux/sched.h>
76388+#include <linux/timer.h>
76389+#include <linux/gracl.h>
76390+#include <linux/grsecurity.h>
76391+#include <linux/grinternal.h>
76392+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
76393+#include <linux/magic.h>
76394+#include <linux/pagemap.h>
76395+#include "../fs/btrfs/async-thread.h"
76396+#include "../fs/btrfs/ctree.h"
76397+#include "../fs/btrfs/btrfs_inode.h"
76398+#endif
76399+
76400+static struct crash_uid *uid_set;
76401+static unsigned short uid_used;
76402+static DEFINE_SPINLOCK(gr_uid_lock);
76403+extern rwlock_t gr_inode_lock;
76404+extern struct acl_subject_label *
76405+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
76406+ struct acl_role_label *role);
76407+
76408+static inline dev_t __get_dev(const struct dentry *dentry)
76409+{
76410+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
76411+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
76412+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
76413+ else
76414+#endif
76415+ return dentry->d_sb->s_dev;
76416+}
76417+
76418+int
76419+gr_init_uidset(void)
76420+{
76421+ uid_set =
76422+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
76423+ uid_used = 0;
76424+
76425+ return uid_set ? 1 : 0;
76426+}
76427+
76428+void
76429+gr_free_uidset(void)
76430+{
76431+ if (uid_set) {
76432+ struct crash_uid *tmpset;
76433+ spin_lock(&gr_uid_lock);
76434+ tmpset = uid_set;
76435+ uid_set = NULL;
76436+ uid_used = 0;
76437+ spin_unlock(&gr_uid_lock);
76438+ if (tmpset)
76439+ kfree(tmpset);
76440+ }
76441+
76442+ return;
76443+}
76444+
76445+int
76446+gr_find_uid(const uid_t uid)
76447+{
76448+ struct crash_uid *tmp = uid_set;
76449+ uid_t buid;
76450+ int low = 0, high = uid_used - 1, mid;
76451+
76452+ while (high >= low) {
76453+ mid = (low + high) >> 1;
76454+ buid = tmp[mid].uid;
76455+ if (buid == uid)
76456+ return mid;
76457+ if (buid > uid)
76458+ high = mid - 1;
76459+ if (buid < uid)
76460+ low = mid + 1;
76461+ }
76462+
76463+ return -1;
76464+}
76465+
76466+static __inline__ void
76467+gr_insertsort(void)
76468+{
76469+ unsigned short i, j;
76470+ struct crash_uid index;
76471+
76472+ for (i = 1; i < uid_used; i++) {
76473+ index = uid_set[i];
76474+ j = i;
76475+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
76476+ uid_set[j] = uid_set[j - 1];
76477+ j--;
76478+ }
76479+ uid_set[j] = index;
76480+ }
76481+
76482+ return;
76483+}
76484+
76485+static __inline__ void
76486+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
76487+{
76488+ int loc;
76489+ uid_t uid = GR_GLOBAL_UID(kuid);
76490+
76491+ if (uid_used == GR_UIDTABLE_MAX)
76492+ return;
76493+
76494+ loc = gr_find_uid(uid);
76495+
76496+ if (loc >= 0) {
76497+ uid_set[loc].expires = expires;
76498+ return;
76499+ }
76500+
76501+ uid_set[uid_used].uid = uid;
76502+ uid_set[uid_used].expires = expires;
76503+ uid_used++;
76504+
76505+ gr_insertsort();
76506+
76507+ return;
76508+}
76509+
76510+void
76511+gr_remove_uid(const unsigned short loc)
76512+{
76513+ unsigned short i;
76514+
76515+ for (i = loc + 1; i < uid_used; i++)
76516+ uid_set[i - 1] = uid_set[i];
76517+
76518+ uid_used--;
76519+
76520+ return;
76521+}
76522+
76523+int
76524+gr_check_crash_uid(const kuid_t kuid)
76525+{
76526+ int loc;
76527+ int ret = 0;
76528+ uid_t uid;
76529+
76530+ if (unlikely(!gr_acl_is_enabled()))
76531+ return 0;
76532+
76533+ uid = GR_GLOBAL_UID(kuid);
76534+
76535+ spin_lock(&gr_uid_lock);
76536+ loc = gr_find_uid(uid);
76537+
76538+ if (loc < 0)
76539+ goto out_unlock;
76540+
76541+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
76542+ gr_remove_uid(loc);
76543+ else
76544+ ret = 1;
76545+
76546+out_unlock:
76547+ spin_unlock(&gr_uid_lock);
76548+ return ret;
76549+}
76550+
76551+static __inline__ int
76552+proc_is_setxid(const struct cred *cred)
76553+{
76554+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
76555+ !uid_eq(cred->uid, cred->fsuid))
76556+ return 1;
76557+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
76558+ !gid_eq(cred->gid, cred->fsgid))
76559+ return 1;
76560+
76561+ return 0;
76562+}
76563+
76564+extern int gr_fake_force_sig(int sig, struct task_struct *t);
76565+
76566+void
76567+gr_handle_crash(struct task_struct *task, const int sig)
76568+{
76569+ struct acl_subject_label *curr;
76570+ struct task_struct *tsk, *tsk2;
76571+ const struct cred *cred;
76572+ const struct cred *cred2;
76573+
76574+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
76575+ return;
76576+
76577+ if (unlikely(!gr_acl_is_enabled()))
76578+ return;
76579+
76580+ curr = task->acl;
76581+
76582+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
76583+ return;
76584+
76585+ if (time_before_eq(curr->expires, get_seconds())) {
76586+ curr->expires = 0;
76587+ curr->crashes = 0;
76588+ }
76589+
76590+ curr->crashes++;
76591+
76592+ if (!curr->expires)
76593+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
76594+
76595+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
76596+ time_after(curr->expires, get_seconds())) {
76597+ rcu_read_lock();
76598+ cred = __task_cred(task);
76599+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
76600+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
76601+ spin_lock(&gr_uid_lock);
76602+ gr_insert_uid(cred->uid, curr->expires);
76603+ spin_unlock(&gr_uid_lock);
76604+ curr->expires = 0;
76605+ curr->crashes = 0;
76606+ read_lock(&tasklist_lock);
76607+ do_each_thread(tsk2, tsk) {
76608+ cred2 = __task_cred(tsk);
76609+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
76610+ gr_fake_force_sig(SIGKILL, tsk);
76611+ } while_each_thread(tsk2, tsk);
76612+ read_unlock(&tasklist_lock);
76613+ } else {
76614+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
76615+ read_lock(&tasklist_lock);
76616+ read_lock(&grsec_exec_file_lock);
76617+ do_each_thread(tsk2, tsk) {
76618+ if (likely(tsk != task)) {
76619+ // if this thread has the same subject as the one that triggered
76620+ // RES_CRASH and it's the same binary, kill it
76621+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
76622+ gr_fake_force_sig(SIGKILL, tsk);
76623+ }
76624+ } while_each_thread(tsk2, tsk);
76625+ read_unlock(&grsec_exec_file_lock);
76626+ read_unlock(&tasklist_lock);
76627+ }
76628+ rcu_read_unlock();
76629+ }
76630+
76631+ return;
76632+}
76633+
76634+int
76635+gr_check_crash_exec(const struct file *filp)
76636+{
76637+ struct acl_subject_label *curr;
76638+
76639+ if (unlikely(!gr_acl_is_enabled()))
76640+ return 0;
76641+
76642+ read_lock(&gr_inode_lock);
76643+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
76644+ __get_dev(filp->f_path.dentry),
76645+ current->role);
76646+ read_unlock(&gr_inode_lock);
76647+
76648+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
76649+ (!curr->crashes && !curr->expires))
76650+ return 0;
76651+
76652+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
76653+ time_after(curr->expires, get_seconds()))
76654+ return 1;
76655+ else if (time_before_eq(curr->expires, get_seconds())) {
76656+ curr->crashes = 0;
76657+ curr->expires = 0;
76658+ }
76659+
76660+ return 0;
76661+}
76662+
76663+void
76664+gr_handle_alertkill(struct task_struct *task)
76665+{
76666+ struct acl_subject_label *curracl;
76667+ __u32 curr_ip;
76668+ struct task_struct *p, *p2;
76669+
76670+ if (unlikely(!gr_acl_is_enabled()))
76671+ return;
76672+
76673+ curracl = task->acl;
76674+ curr_ip = task->signal->curr_ip;
76675+
76676+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
76677+ read_lock(&tasklist_lock);
76678+ do_each_thread(p2, p) {
76679+ if (p->signal->curr_ip == curr_ip)
76680+ gr_fake_force_sig(SIGKILL, p);
76681+ } while_each_thread(p2, p);
76682+ read_unlock(&tasklist_lock);
76683+ } else if (curracl->mode & GR_KILLPROC)
76684+ gr_fake_force_sig(SIGKILL, task);
76685+
76686+ return;
76687+}
76688diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
76689new file mode 100644
76690index 0000000..6b0c9cc
76691--- /dev/null
76692+++ b/grsecurity/gracl_shm.c
76693@@ -0,0 +1,40 @@
76694+#include <linux/kernel.h>
76695+#include <linux/mm.h>
76696+#include <linux/sched.h>
76697+#include <linux/file.h>
76698+#include <linux/ipc.h>
76699+#include <linux/gracl.h>
76700+#include <linux/grsecurity.h>
76701+#include <linux/grinternal.h>
76702+
76703+int
76704+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76705+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
76706+{
76707+ struct task_struct *task;
76708+
76709+ if (!gr_acl_is_enabled())
76710+ return 1;
76711+
76712+ rcu_read_lock();
76713+ read_lock(&tasklist_lock);
76714+
76715+ task = find_task_by_vpid(shm_cprid);
76716+
76717+ if (unlikely(!task))
76718+ task = find_task_by_vpid(shm_lapid);
76719+
76720+ if (unlikely(task && (time_before_eq64(task->start_time, shm_createtime) ||
76721+ (task_pid_nr(task) == shm_lapid)) &&
76722+ (task->acl->mode & GR_PROTSHM) &&
76723+ (task->acl != current->acl))) {
76724+ read_unlock(&tasklist_lock);
76725+ rcu_read_unlock();
76726+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
76727+ return 0;
76728+ }
76729+ read_unlock(&tasklist_lock);
76730+ rcu_read_unlock();
76731+
76732+ return 1;
76733+}
76734diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
76735new file mode 100644
76736index 0000000..bc0be01
76737--- /dev/null
76738+++ b/grsecurity/grsec_chdir.c
76739@@ -0,0 +1,19 @@
76740+#include <linux/kernel.h>
76741+#include <linux/sched.h>
76742+#include <linux/fs.h>
76743+#include <linux/file.h>
76744+#include <linux/grsecurity.h>
76745+#include <linux/grinternal.h>
76746+
76747+void
76748+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
76749+{
76750+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
76751+ if ((grsec_enable_chdir && grsec_enable_group &&
76752+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
76753+ !grsec_enable_group)) {
76754+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
76755+ }
76756+#endif
76757+ return;
76758+}
76759diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
76760new file mode 100644
76761index 0000000..6d99cec
76762--- /dev/null
76763+++ b/grsecurity/grsec_chroot.c
76764@@ -0,0 +1,385 @@
76765+#include <linux/kernel.h>
76766+#include <linux/module.h>
76767+#include <linux/sched.h>
76768+#include <linux/file.h>
76769+#include <linux/fs.h>
76770+#include <linux/mount.h>
76771+#include <linux/types.h>
76772+#include "../fs/mount.h"
76773+#include <linux/grsecurity.h>
76774+#include <linux/grinternal.h>
76775+
76776+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
76777+int gr_init_ran;
76778+#endif
76779+
76780+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
76781+{
76782+#ifdef CONFIG_GRKERNSEC
76783+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
76784+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
76785+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
76786+ && gr_init_ran
76787+#endif
76788+ )
76789+ task->gr_is_chrooted = 1;
76790+ else {
76791+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
76792+ if (task_pid_nr(task) == 1 && !gr_init_ran)
76793+ gr_init_ran = 1;
76794+#endif
76795+ task->gr_is_chrooted = 0;
76796+ }
76797+
76798+ task->gr_chroot_dentry = path->dentry;
76799+#endif
76800+ return;
76801+}
76802+
76803+void gr_clear_chroot_entries(struct task_struct *task)
76804+{
76805+#ifdef CONFIG_GRKERNSEC
76806+ task->gr_is_chrooted = 0;
76807+ task->gr_chroot_dentry = NULL;
76808+#endif
76809+ return;
76810+}
76811+
76812+int
76813+gr_handle_chroot_unix(const pid_t pid)
76814+{
76815+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
76816+ struct task_struct *p;
76817+
76818+ if (unlikely(!grsec_enable_chroot_unix))
76819+ return 1;
76820+
76821+ if (likely(!proc_is_chrooted(current)))
76822+ return 1;
76823+
76824+ rcu_read_lock();
76825+ read_lock(&tasklist_lock);
76826+ p = find_task_by_vpid_unrestricted(pid);
76827+ if (unlikely(p && !have_same_root(current, p))) {
76828+ read_unlock(&tasklist_lock);
76829+ rcu_read_unlock();
76830+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
76831+ return 0;
76832+ }
76833+ read_unlock(&tasklist_lock);
76834+ rcu_read_unlock();
76835+#endif
76836+ return 1;
76837+}
76838+
76839+int
76840+gr_handle_chroot_nice(void)
76841+{
76842+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
76843+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
76844+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
76845+ return -EPERM;
76846+ }
76847+#endif
76848+ return 0;
76849+}
76850+
76851+int
76852+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
76853+{
76854+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
76855+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
76856+ && proc_is_chrooted(current)) {
76857+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
76858+ return -EACCES;
76859+ }
76860+#endif
76861+ return 0;
76862+}
76863+
76864+int
76865+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
76866+{
76867+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
76868+ struct task_struct *p;
76869+ int ret = 0;
76870+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
76871+ return ret;
76872+
76873+ read_lock(&tasklist_lock);
76874+ do_each_pid_task(pid, type, p) {
76875+ if (!have_same_root(current, p)) {
76876+ ret = 1;
76877+ goto out;
76878+ }
76879+ } while_each_pid_task(pid, type, p);
76880+out:
76881+ read_unlock(&tasklist_lock);
76882+ return ret;
76883+#endif
76884+ return 0;
76885+}
76886+
76887+int
76888+gr_pid_is_chrooted(struct task_struct *p)
76889+{
76890+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
76891+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
76892+ return 0;
76893+
76894+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
76895+ !have_same_root(current, p)) {
76896+ return 1;
76897+ }
76898+#endif
76899+ return 0;
76900+}
76901+
76902+EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
76903+
76904+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
76905+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
76906+{
76907+ struct path path, currentroot;
76908+ int ret = 0;
76909+
76910+ path.dentry = (struct dentry *)u_dentry;
76911+ path.mnt = (struct vfsmount *)u_mnt;
76912+ get_fs_root(current->fs, &currentroot);
76913+ if (path_is_under(&path, &currentroot))
76914+ ret = 1;
76915+ path_put(&currentroot);
76916+
76917+ return ret;
76918+}
76919+#endif
76920+
76921+int
76922+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
76923+{
76924+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
76925+ if (!grsec_enable_chroot_fchdir)
76926+ return 1;
76927+
76928+ if (!proc_is_chrooted(current))
76929+ return 1;
76930+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
76931+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
76932+ return 0;
76933+ }
76934+#endif
76935+ return 1;
76936+}
76937+
76938+int
76939+gr_chroot_fhandle(void)
76940+{
76941+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
76942+ if (!grsec_enable_chroot_fchdir)
76943+ return 1;
76944+
76945+ if (!proc_is_chrooted(current))
76946+ return 1;
76947+ else {
76948+ gr_log_noargs(GR_DONT_AUDIT, GR_CHROOT_FHANDLE_MSG);
76949+ return 0;
76950+ }
76951+#endif
76952+ return 1;
76953+}
76954+
76955+int
76956+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76957+ const u64 shm_createtime)
76958+{
76959+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
76960+ struct task_struct *p;
76961+
76962+ if (unlikely(!grsec_enable_chroot_shmat))
76963+ return 1;
76964+
76965+ if (likely(!proc_is_chrooted(current)))
76966+ return 1;
76967+
76968+ rcu_read_lock();
76969+ read_lock(&tasklist_lock);
76970+
76971+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
76972+ if (time_before_eq64(p->start_time, shm_createtime)) {
76973+ if (have_same_root(current, p)) {
76974+ goto allow;
76975+ } else {
76976+ read_unlock(&tasklist_lock);
76977+ rcu_read_unlock();
76978+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
76979+ return 0;
76980+ }
76981+ }
76982+ /* creator exited, pid reuse, fall through to next check */
76983+ }
76984+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
76985+ if (unlikely(!have_same_root(current, p))) {
76986+ read_unlock(&tasklist_lock);
76987+ rcu_read_unlock();
76988+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
76989+ return 0;
76990+ }
76991+ }
76992+
76993+allow:
76994+ read_unlock(&tasklist_lock);
76995+ rcu_read_unlock();
76996+#endif
76997+ return 1;
76998+}
76999+
77000+void
77001+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
77002+{
77003+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
77004+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
77005+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
77006+#endif
77007+ return;
77008+}
77009+
77010+int
77011+gr_handle_chroot_mknod(const struct dentry *dentry,
77012+ const struct vfsmount *mnt, const int mode)
77013+{
77014+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
77015+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
77016+ proc_is_chrooted(current)) {
77017+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
77018+ return -EPERM;
77019+ }
77020+#endif
77021+ return 0;
77022+}
77023+
77024+int
77025+gr_handle_chroot_mount(const struct dentry *dentry,
77026+ const struct vfsmount *mnt, const char *dev_name)
77027+{
77028+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
77029+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
77030+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
77031+ return -EPERM;
77032+ }
77033+#endif
77034+ return 0;
77035+}
77036+
77037+int
77038+gr_handle_chroot_pivot(void)
77039+{
77040+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
77041+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
77042+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
77043+ return -EPERM;
77044+ }
77045+#endif
77046+ return 0;
77047+}
77048+
77049+int
77050+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
77051+{
77052+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
77053+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
77054+ !gr_is_outside_chroot(dentry, mnt)) {
77055+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
77056+ return -EPERM;
77057+ }
77058+#endif
77059+ return 0;
77060+}
77061+
77062+extern const char *captab_log[];
77063+extern int captab_log_entries;
77064+
77065+int
77066+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
77067+{
77068+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77069+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
77070+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
77071+ if (cap_raised(chroot_caps, cap)) {
77072+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
77073+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
77074+ }
77075+ return 0;
77076+ }
77077+ }
77078+#endif
77079+ return 1;
77080+}
77081+
77082+int
77083+gr_chroot_is_capable(const int cap)
77084+{
77085+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77086+ return gr_task_chroot_is_capable(current, current_cred(), cap);
77087+#endif
77088+ return 1;
77089+}
77090+
77091+int
77092+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
77093+{
77094+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77095+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
77096+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
77097+ if (cap_raised(chroot_caps, cap)) {
77098+ return 0;
77099+ }
77100+ }
77101+#endif
77102+ return 1;
77103+}
77104+
77105+int
77106+gr_chroot_is_capable_nolog(const int cap)
77107+{
77108+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77109+ return gr_task_chroot_is_capable_nolog(current, cap);
77110+#endif
77111+ return 1;
77112+}
77113+
77114+int
77115+gr_handle_chroot_sysctl(const int op)
77116+{
77117+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
77118+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
77119+ proc_is_chrooted(current))
77120+ return -EACCES;
77121+#endif
77122+ return 0;
77123+}
77124+
77125+void
77126+gr_handle_chroot_chdir(const struct path *path)
77127+{
77128+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
77129+ if (grsec_enable_chroot_chdir)
77130+ set_fs_pwd(current->fs, path);
77131+#endif
77132+ return;
77133+}
77134+
77135+int
77136+gr_handle_chroot_chmod(const struct dentry *dentry,
77137+ const struct vfsmount *mnt, const int mode)
77138+{
77139+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
77140+ /* allow chmod +s on directories, but not files */
77141+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
77142+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
77143+ proc_is_chrooted(current)) {
77144+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
77145+ return -EPERM;
77146+ }
77147+#endif
77148+ return 0;
77149+}
77150diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
77151new file mode 100644
77152index 0000000..0f9ac91
77153--- /dev/null
77154+++ b/grsecurity/grsec_disabled.c
77155@@ -0,0 +1,440 @@
77156+#include <linux/kernel.h>
77157+#include <linux/module.h>
77158+#include <linux/sched.h>
77159+#include <linux/file.h>
77160+#include <linux/fs.h>
77161+#include <linux/kdev_t.h>
77162+#include <linux/net.h>
77163+#include <linux/in.h>
77164+#include <linux/ip.h>
77165+#include <linux/skbuff.h>
77166+#include <linux/sysctl.h>
77167+
77168+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
77169+void
77170+pax_set_initial_flags(struct linux_binprm *bprm)
77171+{
77172+ return;
77173+}
77174+#endif
77175+
77176+#ifdef CONFIG_SYSCTL
77177+__u32
77178+gr_handle_sysctl(const struct ctl_table * table, const int op)
77179+{
77180+ return 0;
77181+}
77182+#endif
77183+
77184+#ifdef CONFIG_TASKSTATS
77185+int gr_is_taskstats_denied(int pid)
77186+{
77187+ return 0;
77188+}
77189+#endif
77190+
77191+int
77192+gr_acl_is_enabled(void)
77193+{
77194+ return 0;
77195+}
77196+
77197+int
77198+gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
77199+{
77200+ return 0;
77201+}
77202+
77203+void
77204+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
77205+{
77206+ return;
77207+}
77208+
77209+int
77210+gr_handle_rawio(const struct inode *inode)
77211+{
77212+ return 0;
77213+}
77214+
77215+void
77216+gr_acl_handle_psacct(struct task_struct *task, const long code)
77217+{
77218+ return;
77219+}
77220+
77221+int
77222+gr_handle_ptrace(struct task_struct *task, const long request)
77223+{
77224+ return 0;
77225+}
77226+
77227+int
77228+gr_handle_proc_ptrace(struct task_struct *task)
77229+{
77230+ return 0;
77231+}
77232+
77233+int
77234+gr_set_acls(const int type)
77235+{
77236+ return 0;
77237+}
77238+
77239+int
77240+gr_check_hidden_task(const struct task_struct *tsk)
77241+{
77242+ return 0;
77243+}
77244+
77245+int
77246+gr_check_protected_task(const struct task_struct *task)
77247+{
77248+ return 0;
77249+}
77250+
77251+int
77252+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
77253+{
77254+ return 0;
77255+}
77256+
77257+void
77258+gr_copy_label(struct task_struct *tsk)
77259+{
77260+ return;
77261+}
77262+
77263+void
77264+gr_set_pax_flags(struct task_struct *task)
77265+{
77266+ return;
77267+}
77268+
77269+int
77270+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
77271+ const int unsafe_share)
77272+{
77273+ return 0;
77274+}
77275+
77276+void
77277+gr_handle_delete(const ino_t ino, const dev_t dev)
77278+{
77279+ return;
77280+}
77281+
77282+void
77283+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
77284+{
77285+ return;
77286+}
77287+
77288+void
77289+gr_handle_crash(struct task_struct *task, const int sig)
77290+{
77291+ return;
77292+}
77293+
77294+int
77295+gr_check_crash_exec(const struct file *filp)
77296+{
77297+ return 0;
77298+}
77299+
77300+int
77301+gr_check_crash_uid(const kuid_t uid)
77302+{
77303+ return 0;
77304+}
77305+
77306+void
77307+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
77308+ struct dentry *old_dentry,
77309+ struct dentry *new_dentry,
77310+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
77311+{
77312+ return;
77313+}
77314+
77315+int
77316+gr_search_socket(const int family, const int type, const int protocol)
77317+{
77318+ return 1;
77319+}
77320+
77321+int
77322+gr_search_connectbind(const int mode, const struct socket *sock,
77323+ const struct sockaddr_in *addr)
77324+{
77325+ return 0;
77326+}
77327+
77328+void
77329+gr_handle_alertkill(struct task_struct *task)
77330+{
77331+ return;
77332+}
77333+
77334+__u32
77335+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
77336+{
77337+ return 1;
77338+}
77339+
77340+__u32
77341+gr_acl_handle_hidden_file(const struct dentry * dentry,
77342+ const struct vfsmount * mnt)
77343+{
77344+ return 1;
77345+}
77346+
77347+__u32
77348+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
77349+ int acc_mode)
77350+{
77351+ return 1;
77352+}
77353+
77354+__u32
77355+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
77356+{
77357+ return 1;
77358+}
77359+
77360+__u32
77361+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
77362+{
77363+ return 1;
77364+}
77365+
77366+int
77367+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
77368+ unsigned int *vm_flags)
77369+{
77370+ return 1;
77371+}
77372+
77373+__u32
77374+gr_acl_handle_truncate(const struct dentry * dentry,
77375+ const struct vfsmount * mnt)
77376+{
77377+ return 1;
77378+}
77379+
77380+__u32
77381+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
77382+{
77383+ return 1;
77384+}
77385+
77386+__u32
77387+gr_acl_handle_access(const struct dentry * dentry,
77388+ const struct vfsmount * mnt, const int fmode)
77389+{
77390+ return 1;
77391+}
77392+
77393+__u32
77394+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
77395+ umode_t *mode)
77396+{
77397+ return 1;
77398+}
77399+
77400+__u32
77401+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
77402+{
77403+ return 1;
77404+}
77405+
77406+__u32
77407+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
77408+{
77409+ return 1;
77410+}
77411+
77412+__u32
77413+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
77414+{
77415+ return 1;
77416+}
77417+
77418+void
77419+grsecurity_init(void)
77420+{
77421+ return;
77422+}
77423+
77424+umode_t gr_acl_umask(void)
77425+{
77426+ return 0;
77427+}
77428+
77429+__u32
77430+gr_acl_handle_mknod(const struct dentry * new_dentry,
77431+ const struct dentry * parent_dentry,
77432+ const struct vfsmount * parent_mnt,
77433+ const int mode)
77434+{
77435+ return 1;
77436+}
77437+
77438+__u32
77439+gr_acl_handle_mkdir(const struct dentry * new_dentry,
77440+ const struct dentry * parent_dentry,
77441+ const struct vfsmount * parent_mnt)
77442+{
77443+ return 1;
77444+}
77445+
77446+__u32
77447+gr_acl_handle_symlink(const struct dentry * new_dentry,
77448+ const struct dentry * parent_dentry,
77449+ const struct vfsmount * parent_mnt, const struct filename *from)
77450+{
77451+ return 1;
77452+}
77453+
77454+__u32
77455+gr_acl_handle_link(const struct dentry * new_dentry,
77456+ const struct dentry * parent_dentry,
77457+ const struct vfsmount * parent_mnt,
77458+ const struct dentry * old_dentry,
77459+ const struct vfsmount * old_mnt, const struct filename *to)
77460+{
77461+ return 1;
77462+}
77463+
77464+int
77465+gr_acl_handle_rename(const struct dentry *new_dentry,
77466+ const struct dentry *parent_dentry,
77467+ const struct vfsmount *parent_mnt,
77468+ const struct dentry *old_dentry,
77469+ const struct inode *old_parent_inode,
77470+ const struct vfsmount *old_mnt, const struct filename *newname,
77471+ unsigned int flags)
77472+{
77473+ return 0;
77474+}
77475+
77476+int
77477+gr_acl_handle_filldir(const struct file *file, const char *name,
77478+ const int namelen, const ino_t ino)
77479+{
77480+ return 1;
77481+}
77482+
77483+int
77484+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
77485+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
77486+{
77487+ return 1;
77488+}
77489+
77490+int
77491+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
77492+{
77493+ return 0;
77494+}
77495+
77496+int
77497+gr_search_accept(const struct socket *sock)
77498+{
77499+ return 0;
77500+}
77501+
77502+int
77503+gr_search_listen(const struct socket *sock)
77504+{
77505+ return 0;
77506+}
77507+
77508+int
77509+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
77510+{
77511+ return 0;
77512+}
77513+
77514+__u32
77515+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
77516+{
77517+ return 1;
77518+}
77519+
77520+__u32
77521+gr_acl_handle_creat(const struct dentry * dentry,
77522+ const struct dentry * p_dentry,
77523+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
77524+ const int imode)
77525+{
77526+ return 1;
77527+}
77528+
77529+void
77530+gr_acl_handle_exit(void)
77531+{
77532+ return;
77533+}
77534+
77535+int
77536+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
77537+{
77538+ return 1;
77539+}
77540+
77541+void
77542+gr_set_role_label(const kuid_t uid, const kgid_t gid)
77543+{
77544+ return;
77545+}
77546+
77547+int
77548+gr_acl_handle_procpidmem(const struct task_struct *task)
77549+{
77550+ return 0;
77551+}
77552+
77553+int
77554+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
77555+{
77556+ return 0;
77557+}
77558+
77559+int
77560+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
77561+{
77562+ return 0;
77563+}
77564+
77565+int
77566+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
77567+{
77568+ return 0;
77569+}
77570+
77571+int
77572+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
77573+{
77574+ return 0;
77575+}
77576+
77577+int gr_acl_enable_at_secure(void)
77578+{
77579+ return 0;
77580+}
77581+
77582+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
77583+{
77584+ return dentry->d_sb->s_dev;
77585+}
77586+
77587+void gr_put_exec_file(struct task_struct *task)
77588+{
77589+ return;
77590+}
77591+
77592+#ifdef CONFIG_SECURITY
77593+EXPORT_SYMBOL_GPL(gr_check_user_change);
77594+EXPORT_SYMBOL_GPL(gr_check_group_change);
77595+#endif
77596diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
77597new file mode 100644
77598index 0000000..14638ff
77599--- /dev/null
77600+++ b/grsecurity/grsec_exec.c
77601@@ -0,0 +1,188 @@
77602+#include <linux/kernel.h>
77603+#include <linux/sched.h>
77604+#include <linux/file.h>
77605+#include <linux/binfmts.h>
77606+#include <linux/fs.h>
77607+#include <linux/types.h>
77608+#include <linux/grdefs.h>
77609+#include <linux/grsecurity.h>
77610+#include <linux/grinternal.h>
77611+#include <linux/capability.h>
77612+#include <linux/module.h>
77613+#include <linux/compat.h>
77614+
77615+#include <asm/uaccess.h>
77616+
77617+#ifdef CONFIG_GRKERNSEC_EXECLOG
77618+static char gr_exec_arg_buf[132];
77619+static DEFINE_MUTEX(gr_exec_arg_mutex);
77620+#endif
77621+
77622+struct user_arg_ptr {
77623+#ifdef CONFIG_COMPAT
77624+ bool is_compat;
77625+#endif
77626+ union {
77627+ const char __user *const __user *native;
77628+#ifdef CONFIG_COMPAT
77629+ const compat_uptr_t __user *compat;
77630+#endif
77631+ } ptr;
77632+};
77633+
77634+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
77635+
77636+void
77637+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
77638+{
77639+#ifdef CONFIG_GRKERNSEC_EXECLOG
77640+ char *grarg = gr_exec_arg_buf;
77641+ unsigned int i, x, execlen = 0;
77642+ char c;
77643+
77644+ if (!((grsec_enable_execlog && grsec_enable_group &&
77645+ in_group_p(grsec_audit_gid))
77646+ || (grsec_enable_execlog && !grsec_enable_group)))
77647+ return;
77648+
77649+ mutex_lock(&gr_exec_arg_mutex);
77650+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
77651+
77652+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
77653+ const char __user *p;
77654+ unsigned int len;
77655+
77656+ p = get_user_arg_ptr(argv, i);
77657+ if (IS_ERR(p))
77658+ goto log;
77659+
77660+ len = strnlen_user(p, 128 - execlen);
77661+ if (len > 128 - execlen)
77662+ len = 128 - execlen;
77663+ else if (len > 0)
77664+ len--;
77665+ if (copy_from_user(grarg + execlen, p, len))
77666+ goto log;
77667+
77668+ /* rewrite unprintable characters */
77669+ for (x = 0; x < len; x++) {
77670+ c = *(grarg + execlen + x);
77671+ if (c < 32 || c > 126)
77672+ *(grarg + execlen + x) = ' ';
77673+ }
77674+
77675+ execlen += len;
77676+ *(grarg + execlen) = ' ';
77677+ *(grarg + execlen + 1) = '\0';
77678+ execlen++;
77679+ }
77680+
77681+ log:
77682+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
77683+ bprm->file->f_path.mnt, grarg);
77684+ mutex_unlock(&gr_exec_arg_mutex);
77685+#endif
77686+ return;
77687+}
77688+
77689+#ifdef CONFIG_GRKERNSEC
77690+extern int gr_acl_is_capable(const int cap);
77691+extern int gr_acl_is_capable_nolog(const int cap);
77692+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
77693+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
77694+extern int gr_chroot_is_capable(const int cap);
77695+extern int gr_chroot_is_capable_nolog(const int cap);
77696+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
77697+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
77698+#endif
77699+
77700+const char *captab_log[] = {
77701+ "CAP_CHOWN",
77702+ "CAP_DAC_OVERRIDE",
77703+ "CAP_DAC_READ_SEARCH",
77704+ "CAP_FOWNER",
77705+ "CAP_FSETID",
77706+ "CAP_KILL",
77707+ "CAP_SETGID",
77708+ "CAP_SETUID",
77709+ "CAP_SETPCAP",
77710+ "CAP_LINUX_IMMUTABLE",
77711+ "CAP_NET_BIND_SERVICE",
77712+ "CAP_NET_BROADCAST",
77713+ "CAP_NET_ADMIN",
77714+ "CAP_NET_RAW",
77715+ "CAP_IPC_LOCK",
77716+ "CAP_IPC_OWNER",
77717+ "CAP_SYS_MODULE",
77718+ "CAP_SYS_RAWIO",
77719+ "CAP_SYS_CHROOT",
77720+ "CAP_SYS_PTRACE",
77721+ "CAP_SYS_PACCT",
77722+ "CAP_SYS_ADMIN",
77723+ "CAP_SYS_BOOT",
77724+ "CAP_SYS_NICE",
77725+ "CAP_SYS_RESOURCE",
77726+ "CAP_SYS_TIME",
77727+ "CAP_SYS_TTY_CONFIG",
77728+ "CAP_MKNOD",
77729+ "CAP_LEASE",
77730+ "CAP_AUDIT_WRITE",
77731+ "CAP_AUDIT_CONTROL",
77732+ "CAP_SETFCAP",
77733+ "CAP_MAC_OVERRIDE",
77734+ "CAP_MAC_ADMIN",
77735+ "CAP_SYSLOG",
77736+ "CAP_WAKE_ALARM",
77737+ "CAP_BLOCK_SUSPEND"
77738+};
77739+
77740+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
77741+
77742+int gr_is_capable(const int cap)
77743+{
77744+#ifdef CONFIG_GRKERNSEC
77745+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
77746+ return 1;
77747+ return 0;
77748+#else
77749+ return 1;
77750+#endif
77751+}
77752+
77753+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
77754+{
77755+#ifdef CONFIG_GRKERNSEC
77756+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
77757+ return 1;
77758+ return 0;
77759+#else
77760+ return 1;
77761+#endif
77762+}
77763+
77764+int gr_is_capable_nolog(const int cap)
77765+{
77766+#ifdef CONFIG_GRKERNSEC
77767+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
77768+ return 1;
77769+ return 0;
77770+#else
77771+ return 1;
77772+#endif
77773+}
77774+
77775+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
77776+{
77777+#ifdef CONFIG_GRKERNSEC
77778+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
77779+ return 1;
77780+ return 0;
77781+#else
77782+ return 1;
77783+#endif
77784+}
77785+
77786+EXPORT_SYMBOL_GPL(gr_is_capable);
77787+EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
77788+EXPORT_SYMBOL_GPL(gr_task_is_capable);
77789+EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
77790diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
77791new file mode 100644
77792index 0000000..06cc6ea
77793--- /dev/null
77794+++ b/grsecurity/grsec_fifo.c
77795@@ -0,0 +1,24 @@
77796+#include <linux/kernel.h>
77797+#include <linux/sched.h>
77798+#include <linux/fs.h>
77799+#include <linux/file.h>
77800+#include <linux/grinternal.h>
77801+
77802+int
77803+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
77804+ const struct dentry *dir, const int flag, const int acc_mode)
77805+{
77806+#ifdef CONFIG_GRKERNSEC_FIFO
77807+ const struct cred *cred = current_cred();
77808+
77809+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
77810+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
77811+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
77812+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
77813+ if (!inode_permission(dentry->d_inode, acc_mode))
77814+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
77815+ return -EACCES;
77816+ }
77817+#endif
77818+ return 0;
77819+}
77820diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
77821new file mode 100644
77822index 0000000..8ca18bf
77823--- /dev/null
77824+++ b/grsecurity/grsec_fork.c
77825@@ -0,0 +1,23 @@
77826+#include <linux/kernel.h>
77827+#include <linux/sched.h>
77828+#include <linux/grsecurity.h>
77829+#include <linux/grinternal.h>
77830+#include <linux/errno.h>
77831+
77832+void
77833+gr_log_forkfail(const int retval)
77834+{
77835+#ifdef CONFIG_GRKERNSEC_FORKFAIL
77836+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
77837+ switch (retval) {
77838+ case -EAGAIN:
77839+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
77840+ break;
77841+ case -ENOMEM:
77842+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
77843+ break;
77844+ }
77845+ }
77846+#endif
77847+ return;
77848+}
77849diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
77850new file mode 100644
77851index 0000000..b7cb191
77852--- /dev/null
77853+++ b/grsecurity/grsec_init.c
77854@@ -0,0 +1,286 @@
77855+#include <linux/kernel.h>
77856+#include <linux/sched.h>
77857+#include <linux/mm.h>
77858+#include <linux/gracl.h>
77859+#include <linux/slab.h>
77860+#include <linux/vmalloc.h>
77861+#include <linux/percpu.h>
77862+#include <linux/module.h>
77863+
77864+int grsec_enable_ptrace_readexec;
77865+int grsec_enable_setxid;
77866+int grsec_enable_symlinkown;
77867+kgid_t grsec_symlinkown_gid;
77868+int grsec_enable_brute;
77869+int grsec_enable_link;
77870+int grsec_enable_dmesg;
77871+int grsec_enable_harden_ptrace;
77872+int grsec_enable_harden_ipc;
77873+int grsec_enable_fifo;
77874+int grsec_enable_execlog;
77875+int grsec_enable_signal;
77876+int grsec_enable_forkfail;
77877+int grsec_enable_audit_ptrace;
77878+int grsec_enable_time;
77879+int grsec_enable_group;
77880+kgid_t grsec_audit_gid;
77881+int grsec_enable_chdir;
77882+int grsec_enable_mount;
77883+int grsec_enable_rofs;
77884+int grsec_deny_new_usb;
77885+int grsec_enable_chroot_findtask;
77886+int grsec_enable_chroot_mount;
77887+int grsec_enable_chroot_shmat;
77888+int grsec_enable_chroot_fchdir;
77889+int grsec_enable_chroot_double;
77890+int grsec_enable_chroot_pivot;
77891+int grsec_enable_chroot_chdir;
77892+int grsec_enable_chroot_chmod;
77893+int grsec_enable_chroot_mknod;
77894+int grsec_enable_chroot_nice;
77895+int grsec_enable_chroot_execlog;
77896+int grsec_enable_chroot_caps;
77897+int grsec_enable_chroot_sysctl;
77898+int grsec_enable_chroot_unix;
77899+int grsec_enable_tpe;
77900+kgid_t grsec_tpe_gid;
77901+int grsec_enable_blackhole;
77902+#ifdef CONFIG_IPV6_MODULE
77903+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
77904+#endif
77905+int grsec_lastack_retries;
77906+int grsec_enable_tpe_all;
77907+int grsec_enable_tpe_invert;
77908+int grsec_enable_socket_all;
77909+kgid_t grsec_socket_all_gid;
77910+int grsec_enable_socket_client;
77911+kgid_t grsec_socket_client_gid;
77912+int grsec_enable_socket_server;
77913+kgid_t grsec_socket_server_gid;
77914+int grsec_resource_logging;
77915+int grsec_disable_privio;
77916+int grsec_enable_log_rwxmaps;
77917+int grsec_lock;
77918+
77919+DEFINE_SPINLOCK(grsec_alert_lock);
77920+unsigned long grsec_alert_wtime = 0;
77921+unsigned long grsec_alert_fyet = 0;
77922+
77923+DEFINE_SPINLOCK(grsec_audit_lock);
77924+
77925+DEFINE_RWLOCK(grsec_exec_file_lock);
77926+
77927+char *gr_shared_page[4];
77928+
77929+char *gr_alert_log_fmt;
77930+char *gr_audit_log_fmt;
77931+char *gr_alert_log_buf;
77932+char *gr_audit_log_buf;
77933+
77934+extern struct gr_arg *gr_usermode;
77935+extern unsigned char *gr_system_salt;
77936+extern unsigned char *gr_system_sum;
77937+
77938+void __init
77939+grsecurity_init(void)
77940+{
77941+ int j;
77942+ /* create the per-cpu shared pages */
77943+
77944+#ifdef CONFIG_X86
77945+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
77946+#endif
77947+
77948+ for (j = 0; j < 4; j++) {
77949+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
77950+ if (gr_shared_page[j] == NULL) {
77951+ panic("Unable to allocate grsecurity shared page");
77952+ return;
77953+ }
77954+ }
77955+
77956+ /* allocate log buffers */
77957+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
77958+ if (!gr_alert_log_fmt) {
77959+ panic("Unable to allocate grsecurity alert log format buffer");
77960+ return;
77961+ }
77962+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
77963+ if (!gr_audit_log_fmt) {
77964+ panic("Unable to allocate grsecurity audit log format buffer");
77965+ return;
77966+ }
77967+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
77968+ if (!gr_alert_log_buf) {
77969+ panic("Unable to allocate grsecurity alert log buffer");
77970+ return;
77971+ }
77972+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
77973+ if (!gr_audit_log_buf) {
77974+ panic("Unable to allocate grsecurity audit log buffer");
77975+ return;
77976+ }
77977+
77978+ /* allocate memory for authentication structure */
77979+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
77980+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
77981+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
77982+
77983+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
77984+ panic("Unable to allocate grsecurity authentication structure");
77985+ return;
77986+ }
77987+
77988+#ifdef CONFIG_GRKERNSEC_IO
77989+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
77990+ grsec_disable_privio = 1;
77991+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
77992+ grsec_disable_privio = 1;
77993+#else
77994+ grsec_disable_privio = 0;
77995+#endif
77996+#endif
77997+
77998+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
77999+ /* for backward compatibility, tpe_invert always defaults to on if
78000+ enabled in the kernel
78001+ */
78002+ grsec_enable_tpe_invert = 1;
78003+#endif
78004+
78005+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
78006+#ifndef CONFIG_GRKERNSEC_SYSCTL
78007+ grsec_lock = 1;
78008+#endif
78009+
78010+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78011+ grsec_enable_log_rwxmaps = 1;
78012+#endif
78013+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
78014+ grsec_enable_group = 1;
78015+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
78016+#endif
78017+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
78018+ grsec_enable_ptrace_readexec = 1;
78019+#endif
78020+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
78021+ grsec_enable_chdir = 1;
78022+#endif
78023+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
78024+ grsec_enable_harden_ptrace = 1;
78025+#endif
78026+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
78027+ grsec_enable_harden_ipc = 1;
78028+#endif
78029+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78030+ grsec_enable_mount = 1;
78031+#endif
78032+#ifdef CONFIG_GRKERNSEC_LINK
78033+ grsec_enable_link = 1;
78034+#endif
78035+#ifdef CONFIG_GRKERNSEC_BRUTE
78036+ grsec_enable_brute = 1;
78037+#endif
78038+#ifdef CONFIG_GRKERNSEC_DMESG
78039+ grsec_enable_dmesg = 1;
78040+#endif
78041+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
78042+ grsec_enable_blackhole = 1;
78043+ grsec_lastack_retries = 4;
78044+#endif
78045+#ifdef CONFIG_GRKERNSEC_FIFO
78046+ grsec_enable_fifo = 1;
78047+#endif
78048+#ifdef CONFIG_GRKERNSEC_EXECLOG
78049+ grsec_enable_execlog = 1;
78050+#endif
78051+#ifdef CONFIG_GRKERNSEC_SETXID
78052+ grsec_enable_setxid = 1;
78053+#endif
78054+#ifdef CONFIG_GRKERNSEC_SIGNAL
78055+ grsec_enable_signal = 1;
78056+#endif
78057+#ifdef CONFIG_GRKERNSEC_FORKFAIL
78058+ grsec_enable_forkfail = 1;
78059+#endif
78060+#ifdef CONFIG_GRKERNSEC_TIME
78061+ grsec_enable_time = 1;
78062+#endif
78063+#ifdef CONFIG_GRKERNSEC_RESLOG
78064+ grsec_resource_logging = 1;
78065+#endif
78066+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
78067+ grsec_enable_chroot_findtask = 1;
78068+#endif
78069+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
78070+ grsec_enable_chroot_unix = 1;
78071+#endif
78072+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
78073+ grsec_enable_chroot_mount = 1;
78074+#endif
78075+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
78076+ grsec_enable_chroot_fchdir = 1;
78077+#endif
78078+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
78079+ grsec_enable_chroot_shmat = 1;
78080+#endif
78081+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
78082+ grsec_enable_audit_ptrace = 1;
78083+#endif
78084+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
78085+ grsec_enable_chroot_double = 1;
78086+#endif
78087+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
78088+ grsec_enable_chroot_pivot = 1;
78089+#endif
78090+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
78091+ grsec_enable_chroot_chdir = 1;
78092+#endif
78093+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
78094+ grsec_enable_chroot_chmod = 1;
78095+#endif
78096+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
78097+ grsec_enable_chroot_mknod = 1;
78098+#endif
78099+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
78100+ grsec_enable_chroot_nice = 1;
78101+#endif
78102+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
78103+ grsec_enable_chroot_execlog = 1;
78104+#endif
78105+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
78106+ grsec_enable_chroot_caps = 1;
78107+#endif
78108+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
78109+ grsec_enable_chroot_sysctl = 1;
78110+#endif
78111+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
78112+ grsec_enable_symlinkown = 1;
78113+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
78114+#endif
78115+#ifdef CONFIG_GRKERNSEC_TPE
78116+ grsec_enable_tpe = 1;
78117+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
78118+#ifdef CONFIG_GRKERNSEC_TPE_ALL
78119+ grsec_enable_tpe_all = 1;
78120+#endif
78121+#endif
78122+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
78123+ grsec_enable_socket_all = 1;
78124+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
78125+#endif
78126+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
78127+ grsec_enable_socket_client = 1;
78128+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
78129+#endif
78130+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78131+ grsec_enable_socket_server = 1;
78132+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
78133+#endif
78134+#endif
78135+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
78136+ grsec_deny_new_usb = 1;
78137+#endif
78138+
78139+ return;
78140+}
78141diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
78142new file mode 100644
78143index 0000000..1773300
78144--- /dev/null
78145+++ b/grsecurity/grsec_ipc.c
78146@@ -0,0 +1,48 @@
78147+#include <linux/kernel.h>
78148+#include <linux/mm.h>
78149+#include <linux/sched.h>
78150+#include <linux/file.h>
78151+#include <linux/ipc.h>
78152+#include <linux/ipc_namespace.h>
78153+#include <linux/grsecurity.h>
78154+#include <linux/grinternal.h>
78155+
78156+int
78157+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
78158+{
78159+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
78160+ int write;
78161+ int orig_granted_mode;
78162+ kuid_t euid;
78163+ kgid_t egid;
78164+
78165+ if (!grsec_enable_harden_ipc)
78166+ return 1;
78167+
78168+ euid = current_euid();
78169+ egid = current_egid();
78170+
78171+ write = requested_mode & 00002;
78172+ orig_granted_mode = ipcp->mode;
78173+
78174+ if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
78175+ orig_granted_mode >>= 6;
78176+ else {
78177+ /* if likely wrong permissions, lock to user */
78178+ if (orig_granted_mode & 0007)
78179+ orig_granted_mode = 0;
78180+ /* otherwise do a egid-only check */
78181+ else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
78182+ orig_granted_mode >>= 3;
78183+ /* otherwise, no access */
78184+ else
78185+ orig_granted_mode = 0;
78186+ }
78187+ if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
78188+ !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
78189+ gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
78190+ return 0;
78191+ }
78192+#endif
78193+ return 1;
78194+}
78195diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
78196new file mode 100644
78197index 0000000..5e05e20
78198--- /dev/null
78199+++ b/grsecurity/grsec_link.c
78200@@ -0,0 +1,58 @@
78201+#include <linux/kernel.h>
78202+#include <linux/sched.h>
78203+#include <linux/fs.h>
78204+#include <linux/file.h>
78205+#include <linux/grinternal.h>
78206+
78207+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
78208+{
78209+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
78210+ const struct inode *link_inode = link->dentry->d_inode;
78211+
78212+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
78213+ /* ignore root-owned links, e.g. /proc/self */
78214+ gr_is_global_nonroot(link_inode->i_uid) && target &&
78215+ !uid_eq(link_inode->i_uid, target->i_uid)) {
78216+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
78217+ return 1;
78218+ }
78219+#endif
78220+ return 0;
78221+}
78222+
78223+int
78224+gr_handle_follow_link(const struct inode *parent,
78225+ const struct inode *inode,
78226+ const struct dentry *dentry, const struct vfsmount *mnt)
78227+{
78228+#ifdef CONFIG_GRKERNSEC_LINK
78229+ const struct cred *cred = current_cred();
78230+
78231+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
78232+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
78233+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
78234+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
78235+ return -EACCES;
78236+ }
78237+#endif
78238+ return 0;
78239+}
78240+
78241+int
78242+gr_handle_hardlink(const struct dentry *dentry,
78243+ const struct vfsmount *mnt,
78244+ struct inode *inode, const int mode, const struct filename *to)
78245+{
78246+#ifdef CONFIG_GRKERNSEC_LINK
78247+ const struct cred *cred = current_cred();
78248+
78249+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
78250+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
78251+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
78252+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
78253+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
78254+ return -EPERM;
78255+ }
78256+#endif
78257+ return 0;
78258+}
78259diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
78260new file mode 100644
78261index 0000000..dbe0a6b
78262--- /dev/null
78263+++ b/grsecurity/grsec_log.c
78264@@ -0,0 +1,341 @@
78265+#include <linux/kernel.h>
78266+#include <linux/sched.h>
78267+#include <linux/file.h>
78268+#include <linux/tty.h>
78269+#include <linux/fs.h>
78270+#include <linux/mm.h>
78271+#include <linux/grinternal.h>
78272+
78273+#ifdef CONFIG_TREE_PREEMPT_RCU
78274+#define DISABLE_PREEMPT() preempt_disable()
78275+#define ENABLE_PREEMPT() preempt_enable()
78276+#else
78277+#define DISABLE_PREEMPT()
78278+#define ENABLE_PREEMPT()
78279+#endif
78280+
78281+#define BEGIN_LOCKS(x) \
78282+ DISABLE_PREEMPT(); \
78283+ rcu_read_lock(); \
78284+ read_lock(&tasklist_lock); \
78285+ read_lock(&grsec_exec_file_lock); \
78286+ if (x != GR_DO_AUDIT) \
78287+ spin_lock(&grsec_alert_lock); \
78288+ else \
78289+ spin_lock(&grsec_audit_lock)
78290+
78291+#define END_LOCKS(x) \
78292+ if (x != GR_DO_AUDIT) \
78293+ spin_unlock(&grsec_alert_lock); \
78294+ else \
78295+ spin_unlock(&grsec_audit_lock); \
78296+ read_unlock(&grsec_exec_file_lock); \
78297+ read_unlock(&tasklist_lock); \
78298+ rcu_read_unlock(); \
78299+ ENABLE_PREEMPT(); \
78300+ if (x == GR_DONT_AUDIT) \
78301+ gr_handle_alertkill(current)
78302+
78303+enum {
78304+ FLOODING,
78305+ NO_FLOODING
78306+};
78307+
78308+extern char *gr_alert_log_fmt;
78309+extern char *gr_audit_log_fmt;
78310+extern char *gr_alert_log_buf;
78311+extern char *gr_audit_log_buf;
78312+
78313+static int gr_log_start(int audit)
78314+{
78315+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
78316+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
78317+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
78318+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
78319+ unsigned long curr_secs = get_seconds();
78320+
78321+ if (audit == GR_DO_AUDIT)
78322+ goto set_fmt;
78323+
78324+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
78325+ grsec_alert_wtime = curr_secs;
78326+ grsec_alert_fyet = 0;
78327+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
78328+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
78329+ grsec_alert_fyet++;
78330+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
78331+ grsec_alert_wtime = curr_secs;
78332+ grsec_alert_fyet++;
78333+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
78334+ return FLOODING;
78335+ }
78336+ else return FLOODING;
78337+
78338+set_fmt:
78339+#endif
78340+ memset(buf, 0, PAGE_SIZE);
78341+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
78342+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
78343+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
78344+ } else if (current->signal->curr_ip) {
78345+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
78346+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
78347+ } else if (gr_acl_is_enabled()) {
78348+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
78349+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
78350+ } else {
78351+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
78352+ strcpy(buf, fmt);
78353+ }
78354+
78355+ return NO_FLOODING;
78356+}
78357+
78358+static void gr_log_middle(int audit, const char *msg, va_list ap)
78359+ __attribute__ ((format (printf, 2, 0)));
78360+
78361+static void gr_log_middle(int audit, const char *msg, va_list ap)
78362+{
78363+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
78364+ unsigned int len = strlen(buf);
78365+
78366+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
78367+
78368+ return;
78369+}
78370+
78371+static void gr_log_middle_varargs(int audit, const char *msg, ...)
78372+ __attribute__ ((format (printf, 2, 3)));
78373+
78374+static void gr_log_middle_varargs(int audit, const char *msg, ...)
78375+{
78376+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
78377+ unsigned int len = strlen(buf);
78378+ va_list ap;
78379+
78380+ va_start(ap, msg);
78381+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
78382+ va_end(ap);
78383+
78384+ return;
78385+}
78386+
78387+static void gr_log_end(int audit, int append_default)
78388+{
78389+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
78390+ if (append_default) {
78391+ struct task_struct *task = current;
78392+ struct task_struct *parent = task->real_parent;
78393+ const struct cred *cred = __task_cred(task);
78394+ const struct cred *pcred = __task_cred(parent);
78395+ unsigned int len = strlen(buf);
78396+
78397+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78398+ }
78399+
78400+ printk("%s\n", buf);
78401+
78402+ return;
78403+}
78404+
78405+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
78406+{
78407+ int logtype;
78408+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
78409+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
78410+ void *voidptr = NULL;
78411+ int num1 = 0, num2 = 0;
78412+ unsigned long ulong1 = 0, ulong2 = 0;
78413+ struct dentry *dentry = NULL;
78414+ struct vfsmount *mnt = NULL;
78415+ struct file *file = NULL;
78416+ struct task_struct *task = NULL;
78417+ struct vm_area_struct *vma = NULL;
78418+ const struct cred *cred, *pcred;
78419+ va_list ap;
78420+
78421+ BEGIN_LOCKS(audit);
78422+ logtype = gr_log_start(audit);
78423+ if (logtype == FLOODING) {
78424+ END_LOCKS(audit);
78425+ return;
78426+ }
78427+ va_start(ap, argtypes);
78428+ switch (argtypes) {
78429+ case GR_TTYSNIFF:
78430+ task = va_arg(ap, struct task_struct *);
78431+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
78432+ break;
78433+ case GR_SYSCTL_HIDDEN:
78434+ str1 = va_arg(ap, char *);
78435+ gr_log_middle_varargs(audit, msg, result, str1);
78436+ break;
78437+ case GR_RBAC:
78438+ dentry = va_arg(ap, struct dentry *);
78439+ mnt = va_arg(ap, struct vfsmount *);
78440+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
78441+ break;
78442+ case GR_RBAC_STR:
78443+ dentry = va_arg(ap, struct dentry *);
78444+ mnt = va_arg(ap, struct vfsmount *);
78445+ str1 = va_arg(ap, char *);
78446+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
78447+ break;
78448+ case GR_STR_RBAC:
78449+ str1 = va_arg(ap, char *);
78450+ dentry = va_arg(ap, struct dentry *);
78451+ mnt = va_arg(ap, struct vfsmount *);
78452+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
78453+ break;
78454+ case GR_RBAC_MODE2:
78455+ dentry = va_arg(ap, struct dentry *);
78456+ mnt = va_arg(ap, struct vfsmount *);
78457+ str1 = va_arg(ap, char *);
78458+ str2 = va_arg(ap, char *);
78459+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
78460+ break;
78461+ case GR_RBAC_MODE3:
78462+ dentry = va_arg(ap, struct dentry *);
78463+ mnt = va_arg(ap, struct vfsmount *);
78464+ str1 = va_arg(ap, char *);
78465+ str2 = va_arg(ap, char *);
78466+ str3 = va_arg(ap, char *);
78467+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
78468+ break;
78469+ case GR_FILENAME:
78470+ dentry = va_arg(ap, struct dentry *);
78471+ mnt = va_arg(ap, struct vfsmount *);
78472+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
78473+ break;
78474+ case GR_STR_FILENAME:
78475+ str1 = va_arg(ap, char *);
78476+ dentry = va_arg(ap, struct dentry *);
78477+ mnt = va_arg(ap, struct vfsmount *);
78478+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
78479+ break;
78480+ case GR_FILENAME_STR:
78481+ dentry = va_arg(ap, struct dentry *);
78482+ mnt = va_arg(ap, struct vfsmount *);
78483+ str1 = va_arg(ap, char *);
78484+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
78485+ break;
78486+ case GR_FILENAME_TWO_INT:
78487+ dentry = va_arg(ap, struct dentry *);
78488+ mnt = va_arg(ap, struct vfsmount *);
78489+ num1 = va_arg(ap, int);
78490+ num2 = va_arg(ap, int);
78491+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
78492+ break;
78493+ case GR_FILENAME_TWO_INT_STR:
78494+ dentry = va_arg(ap, struct dentry *);
78495+ mnt = va_arg(ap, struct vfsmount *);
78496+ num1 = va_arg(ap, int);
78497+ num2 = va_arg(ap, int);
78498+ str1 = va_arg(ap, char *);
78499+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
78500+ break;
78501+ case GR_TEXTREL:
78502+ file = va_arg(ap, struct file *);
78503+ ulong1 = va_arg(ap, unsigned long);
78504+ ulong2 = va_arg(ap, unsigned long);
78505+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
78506+ break;
78507+ case GR_PTRACE:
78508+ task = va_arg(ap, struct task_struct *);
78509+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
78510+ break;
78511+ case GR_RESOURCE:
78512+ task = va_arg(ap, struct task_struct *);
78513+ cred = __task_cred(task);
78514+ pcred = __task_cred(task->real_parent);
78515+ ulong1 = va_arg(ap, unsigned long);
78516+ str1 = va_arg(ap, char *);
78517+ ulong2 = va_arg(ap, unsigned long);
78518+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78519+ break;
78520+ case GR_CAP:
78521+ task = va_arg(ap, struct task_struct *);
78522+ cred = __task_cred(task);
78523+ pcred = __task_cred(task->real_parent);
78524+ str1 = va_arg(ap, char *);
78525+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78526+ break;
78527+ case GR_SIG:
78528+ str1 = va_arg(ap, char *);
78529+ voidptr = va_arg(ap, void *);
78530+ gr_log_middle_varargs(audit, msg, str1, voidptr);
78531+ break;
78532+ case GR_SIG2:
78533+ task = va_arg(ap, struct task_struct *);
78534+ cred = __task_cred(task);
78535+ pcred = __task_cred(task->real_parent);
78536+ num1 = va_arg(ap, int);
78537+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78538+ break;
78539+ case GR_CRASH1:
78540+ task = va_arg(ap, struct task_struct *);
78541+ cred = __task_cred(task);
78542+ pcred = __task_cred(task->real_parent);
78543+ ulong1 = va_arg(ap, unsigned long);
78544+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
78545+ break;
78546+ case GR_CRASH2:
78547+ task = va_arg(ap, struct task_struct *);
78548+ cred = __task_cred(task);
78549+ pcred = __task_cred(task->real_parent);
78550+ ulong1 = va_arg(ap, unsigned long);
78551+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
78552+ break;
78553+ case GR_RWXMAP:
78554+ file = va_arg(ap, struct file *);
78555+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
78556+ break;
78557+ case GR_RWXMAPVMA:
78558+ vma = va_arg(ap, struct vm_area_struct *);
78559+ if (vma->vm_file)
78560+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
78561+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
78562+ str1 = "<stack>";
78563+ else if (vma->vm_start <= current->mm->brk &&
78564+ vma->vm_end >= current->mm->start_brk)
78565+ str1 = "<heap>";
78566+ else
78567+ str1 = "<anonymous mapping>";
78568+ gr_log_middle_varargs(audit, msg, str1);
78569+ break;
78570+ case GR_PSACCT:
78571+ {
78572+ unsigned int wday, cday;
78573+ __u8 whr, chr;
78574+ __u8 wmin, cmin;
78575+ __u8 wsec, csec;
78576+ char cur_tty[64] = { 0 };
78577+ char parent_tty[64] = { 0 };
78578+
78579+ task = va_arg(ap, struct task_struct *);
78580+ wday = va_arg(ap, unsigned int);
78581+ cday = va_arg(ap, unsigned int);
78582+ whr = va_arg(ap, int);
78583+ chr = va_arg(ap, int);
78584+ wmin = va_arg(ap, int);
78585+ cmin = va_arg(ap, int);
78586+ wsec = va_arg(ap, int);
78587+ csec = va_arg(ap, int);
78588+ ulong1 = va_arg(ap, unsigned long);
78589+ cred = __task_cred(task);
78590+ pcred = __task_cred(task->real_parent);
78591+
78592+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
78593+ }
78594+ break;
78595+ default:
78596+ gr_log_middle(audit, msg, ap);
78597+ }
78598+ va_end(ap);
78599+ // these don't need DEFAULTSECARGS printed on the end
78600+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
78601+ gr_log_end(audit, 0);
78602+ else
78603+ gr_log_end(audit, 1);
78604+ END_LOCKS(audit);
78605+}
78606diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
78607new file mode 100644
78608index 0000000..0e39d8c
78609--- /dev/null
78610+++ b/grsecurity/grsec_mem.c
78611@@ -0,0 +1,48 @@
78612+#include <linux/kernel.h>
78613+#include <linux/sched.h>
78614+#include <linux/mm.h>
78615+#include <linux/mman.h>
78616+#include <linux/module.h>
78617+#include <linux/grinternal.h>
78618+
78619+void gr_handle_msr_write(void)
78620+{
78621+ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
78622+ return;
78623+}
78624+EXPORT_SYMBOL_GPL(gr_handle_msr_write);
78625+
78626+void
78627+gr_handle_ioperm(void)
78628+{
78629+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
78630+ return;
78631+}
78632+
78633+void
78634+gr_handle_iopl(void)
78635+{
78636+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
78637+ return;
78638+}
78639+
78640+void
78641+gr_handle_mem_readwrite(u64 from, u64 to)
78642+{
78643+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
78644+ return;
78645+}
78646+
78647+void
78648+gr_handle_vm86(void)
78649+{
78650+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
78651+ return;
78652+}
78653+
78654+void
78655+gr_log_badprocpid(const char *entry)
78656+{
78657+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
78658+ return;
78659+}
78660diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
78661new file mode 100644
78662index 0000000..cd9e124
78663--- /dev/null
78664+++ b/grsecurity/grsec_mount.c
78665@@ -0,0 +1,65 @@
78666+#include <linux/kernel.h>
78667+#include <linux/sched.h>
78668+#include <linux/mount.h>
78669+#include <linux/major.h>
78670+#include <linux/grsecurity.h>
78671+#include <linux/grinternal.h>
78672+
78673+void
78674+gr_log_remount(const char *devname, const int retval)
78675+{
78676+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78677+ if (grsec_enable_mount && (retval >= 0))
78678+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
78679+#endif
78680+ return;
78681+}
78682+
78683+void
78684+gr_log_unmount(const char *devname, const int retval)
78685+{
78686+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78687+ if (grsec_enable_mount && (retval >= 0))
78688+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
78689+#endif
78690+ return;
78691+}
78692+
78693+void
78694+gr_log_mount(const char *from, const char *to, const int retval)
78695+{
78696+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78697+ if (grsec_enable_mount && (retval >= 0))
78698+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
78699+#endif
78700+ return;
78701+}
78702+
78703+int
78704+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
78705+{
78706+#ifdef CONFIG_GRKERNSEC_ROFS
78707+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
78708+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
78709+ return -EPERM;
78710+ } else
78711+ return 0;
78712+#endif
78713+ return 0;
78714+}
78715+
78716+int
78717+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
78718+{
78719+#ifdef CONFIG_GRKERNSEC_ROFS
78720+ struct inode *inode = dentry->d_inode;
78721+
78722+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
78723+ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
78724+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
78725+ return -EPERM;
78726+ } else
78727+ return 0;
78728+#endif
78729+ return 0;
78730+}
78731diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
78732new file mode 100644
78733index 0000000..6ee9d50
78734--- /dev/null
78735+++ b/grsecurity/grsec_pax.c
78736@@ -0,0 +1,45 @@
78737+#include <linux/kernel.h>
78738+#include <linux/sched.h>
78739+#include <linux/mm.h>
78740+#include <linux/file.h>
78741+#include <linux/grinternal.h>
78742+#include <linux/grsecurity.h>
78743+
78744+void
78745+gr_log_textrel(struct vm_area_struct * vma)
78746+{
78747+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78748+ if (grsec_enable_log_rwxmaps)
78749+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
78750+#endif
78751+ return;
78752+}
78753+
78754+void gr_log_ptgnustack(struct file *file)
78755+{
78756+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78757+ if (grsec_enable_log_rwxmaps)
78758+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
78759+#endif
78760+ return;
78761+}
78762+
78763+void
78764+gr_log_rwxmmap(struct file *file)
78765+{
78766+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78767+ if (grsec_enable_log_rwxmaps)
78768+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
78769+#endif
78770+ return;
78771+}
78772+
78773+void
78774+gr_log_rwxmprotect(struct vm_area_struct *vma)
78775+{
78776+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78777+ if (grsec_enable_log_rwxmaps)
78778+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
78779+#endif
78780+ return;
78781+}
78782diff --git a/grsecurity/grsec_proc.c b/grsecurity/grsec_proc.c
78783new file mode 100644
78784index 0000000..2005a3a
78785--- /dev/null
78786+++ b/grsecurity/grsec_proc.c
78787@@ -0,0 +1,20 @@
78788+#include <linux/kernel.h>
78789+#include <linux/sched.h>
78790+#include <linux/grsecurity.h>
78791+#include <linux/grinternal.h>
78792+
78793+int gr_proc_is_restricted(void)
78794+{
78795+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78796+ const struct cred *cred = current_cred();
78797+#endif
78798+
78799+#ifdef CONFIG_GRKERNSEC_PROC_USER
78800+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
78801+ return -EACCES;
78802+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78803+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
78804+ return -EACCES;
78805+#endif
78806+ return 0;
78807+}
78808diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
78809new file mode 100644
78810index 0000000..f7f29aa
78811--- /dev/null
78812+++ b/grsecurity/grsec_ptrace.c
78813@@ -0,0 +1,30 @@
78814+#include <linux/kernel.h>
78815+#include <linux/sched.h>
78816+#include <linux/grinternal.h>
78817+#include <linux/security.h>
78818+
78819+void
78820+gr_audit_ptrace(struct task_struct *task)
78821+{
78822+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
78823+ if (grsec_enable_audit_ptrace)
78824+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
78825+#endif
78826+ return;
78827+}
78828+
78829+int
78830+gr_ptrace_readexec(struct file *file, int unsafe_flags)
78831+{
78832+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
78833+ const struct dentry *dentry = file->f_path.dentry;
78834+ const struct vfsmount *mnt = file->f_path.mnt;
78835+
78836+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
78837+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
78838+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
78839+ return -EACCES;
78840+ }
78841+#endif
78842+ return 0;
78843+}
78844diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
78845new file mode 100644
78846index 0000000..3860c7e
78847--- /dev/null
78848+++ b/grsecurity/grsec_sig.c
78849@@ -0,0 +1,236 @@
78850+#include <linux/kernel.h>
78851+#include <linux/sched.h>
78852+#include <linux/fs.h>
78853+#include <linux/delay.h>
78854+#include <linux/grsecurity.h>
78855+#include <linux/grinternal.h>
78856+#include <linux/hardirq.h>
78857+
78858+char *signames[] = {
78859+ [SIGSEGV] = "Segmentation fault",
78860+ [SIGILL] = "Illegal instruction",
78861+ [SIGABRT] = "Abort",
78862+ [SIGBUS] = "Invalid alignment/Bus error"
78863+};
78864+
78865+void
78866+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
78867+{
78868+#ifdef CONFIG_GRKERNSEC_SIGNAL
78869+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
78870+ (sig == SIGABRT) || (sig == SIGBUS))) {
78871+ if (task_pid_nr(t) == task_pid_nr(current)) {
78872+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
78873+ } else {
78874+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
78875+ }
78876+ }
78877+#endif
78878+ return;
78879+}
78880+
78881+int
78882+gr_handle_signal(const struct task_struct *p, const int sig)
78883+{
78884+#ifdef CONFIG_GRKERNSEC
78885+ /* ignore the 0 signal for protected task checks */
78886+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
78887+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
78888+ return -EPERM;
78889+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
78890+ return -EPERM;
78891+ }
78892+#endif
78893+ return 0;
78894+}
78895+
78896+#ifdef CONFIG_GRKERNSEC
78897+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
78898+
78899+int gr_fake_force_sig(int sig, struct task_struct *t)
78900+{
78901+ unsigned long int flags;
78902+ int ret, blocked, ignored;
78903+ struct k_sigaction *action;
78904+
78905+ spin_lock_irqsave(&t->sighand->siglock, flags);
78906+ action = &t->sighand->action[sig-1];
78907+ ignored = action->sa.sa_handler == SIG_IGN;
78908+ blocked = sigismember(&t->blocked, sig);
78909+ if (blocked || ignored) {
78910+ action->sa.sa_handler = SIG_DFL;
78911+ if (blocked) {
78912+ sigdelset(&t->blocked, sig);
78913+ recalc_sigpending_and_wake(t);
78914+ }
78915+ }
78916+ if (action->sa.sa_handler == SIG_DFL)
78917+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
78918+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
78919+
78920+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
78921+
78922+ return ret;
78923+}
78924+#endif
78925+
78926+#define GR_USER_BAN_TIME (15 * 60)
78927+#define GR_DAEMON_BRUTE_TIME (30 * 60)
78928+
78929+void gr_handle_brute_attach(int dumpable)
78930+{
78931+#ifdef CONFIG_GRKERNSEC_BRUTE
78932+ struct task_struct *p = current;
78933+ kuid_t uid = GLOBAL_ROOT_UID;
78934+ int daemon = 0;
78935+
78936+ if (!grsec_enable_brute)
78937+ return;
78938+
78939+ rcu_read_lock();
78940+ read_lock(&tasklist_lock);
78941+ read_lock(&grsec_exec_file_lock);
78942+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
78943+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
78944+ p->real_parent->brute = 1;
78945+ daemon = 1;
78946+ } else {
78947+ const struct cred *cred = __task_cred(p), *cred2;
78948+ struct task_struct *tsk, *tsk2;
78949+
78950+ if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
78951+ struct user_struct *user;
78952+
78953+ uid = cred->uid;
78954+
78955+ /* this is put upon execution past expiration */
78956+ user = find_user(uid);
78957+ if (user == NULL)
78958+ goto unlock;
78959+ user->suid_banned = 1;
78960+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
78961+ if (user->suid_ban_expires == ~0UL)
78962+ user->suid_ban_expires--;
78963+
78964+ /* only kill other threads of the same binary, from the same user */
78965+ do_each_thread(tsk2, tsk) {
78966+ cred2 = __task_cred(tsk);
78967+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
78968+ gr_fake_force_sig(SIGKILL, tsk);
78969+ } while_each_thread(tsk2, tsk);
78970+ }
78971+ }
78972+unlock:
78973+ read_unlock(&grsec_exec_file_lock);
78974+ read_unlock(&tasklist_lock);
78975+ rcu_read_unlock();
78976+
78977+ if (gr_is_global_nonroot(uid))
78978+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
78979+ else if (daemon)
78980+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
78981+
78982+#endif
78983+ return;
78984+}
78985+
78986+void gr_handle_brute_check(void)
78987+{
78988+#ifdef CONFIG_GRKERNSEC_BRUTE
78989+ struct task_struct *p = current;
78990+
78991+ if (unlikely(p->brute)) {
78992+ if (!grsec_enable_brute)
78993+ p->brute = 0;
78994+ else if (time_before(get_seconds(), p->brute_expires))
78995+ msleep(30 * 1000);
78996+ }
78997+#endif
78998+ return;
78999+}
79000+
79001+void gr_handle_kernel_exploit(void)
79002+{
79003+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
79004+ const struct cred *cred;
79005+ struct task_struct *tsk, *tsk2;
79006+ struct user_struct *user;
79007+ kuid_t uid;
79008+
79009+ if (in_irq() || in_serving_softirq() || in_nmi())
79010+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
79011+
79012+ uid = current_uid();
79013+
79014+ if (gr_is_global_root(uid))
79015+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
79016+ else {
79017+ /* kill all the processes of this user, hold a reference
79018+ to their creds struct, and prevent them from creating
79019+ another process until system reset
79020+ */
79021+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
79022+ GR_GLOBAL_UID(uid));
79023+ /* we intentionally leak this ref */
79024+ user = get_uid(current->cred->user);
79025+ if (user)
79026+ user->kernel_banned = 1;
79027+
79028+ /* kill all processes of this user */
79029+ read_lock(&tasklist_lock);
79030+ do_each_thread(tsk2, tsk) {
79031+ cred = __task_cred(tsk);
79032+ if (uid_eq(cred->uid, uid))
79033+ gr_fake_force_sig(SIGKILL, tsk);
79034+ } while_each_thread(tsk2, tsk);
79035+ read_unlock(&tasklist_lock);
79036+ }
79037+#endif
79038+}
79039+
79040+#ifdef CONFIG_GRKERNSEC_BRUTE
79041+static bool suid_ban_expired(struct user_struct *user)
79042+{
79043+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
79044+ user->suid_banned = 0;
79045+ user->suid_ban_expires = 0;
79046+ free_uid(user);
79047+ return true;
79048+ }
79049+
79050+ return false;
79051+}
79052+#endif
79053+
79054+int gr_process_kernel_exec_ban(void)
79055+{
79056+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
79057+ if (unlikely(current->cred->user->kernel_banned))
79058+ return -EPERM;
79059+#endif
79060+ return 0;
79061+}
79062+
79063+int gr_process_kernel_setuid_ban(struct user_struct *user)
79064+{
79065+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
79066+ if (unlikely(user->kernel_banned))
79067+ gr_fake_force_sig(SIGKILL, current);
79068+#endif
79069+ return 0;
79070+}
79071+
79072+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
79073+{
79074+#ifdef CONFIG_GRKERNSEC_BRUTE
79075+ struct user_struct *user = current->cred->user;
79076+ if (unlikely(user->suid_banned)) {
79077+ if (suid_ban_expired(user))
79078+ return 0;
79079+ /* disallow execution of suid binaries only */
79080+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
79081+ return -EPERM;
79082+ }
79083+#endif
79084+ return 0;
79085+}
79086diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
79087new file mode 100644
79088index 0000000..e3650b6
79089--- /dev/null
79090+++ b/grsecurity/grsec_sock.c
79091@@ -0,0 +1,244 @@
79092+#include <linux/kernel.h>
79093+#include <linux/module.h>
79094+#include <linux/sched.h>
79095+#include <linux/file.h>
79096+#include <linux/net.h>
79097+#include <linux/in.h>
79098+#include <linux/ip.h>
79099+#include <net/sock.h>
79100+#include <net/inet_sock.h>
79101+#include <linux/grsecurity.h>
79102+#include <linux/grinternal.h>
79103+#include <linux/gracl.h>
79104+
79105+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
79106+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
79107+
79108+EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
79109+EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
79110+
79111+#ifdef CONFIG_UNIX_MODULE
79112+EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
79113+EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
79114+EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
79115+EXPORT_SYMBOL_GPL(gr_handle_create);
79116+#endif
79117+
79118+#ifdef CONFIG_GRKERNSEC
79119+#define gr_conn_table_size 32749
79120+struct conn_table_entry {
79121+ struct conn_table_entry *next;
79122+ struct signal_struct *sig;
79123+};
79124+
79125+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
79126+DEFINE_SPINLOCK(gr_conn_table_lock);
79127+
79128+extern const char * gr_socktype_to_name(unsigned char type);
79129+extern const char * gr_proto_to_name(unsigned char proto);
79130+extern const char * gr_sockfamily_to_name(unsigned char family);
79131+
79132+static __inline__ int
79133+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
79134+{
79135+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
79136+}
79137+
79138+static __inline__ int
79139+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
79140+ __u16 sport, __u16 dport)
79141+{
79142+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
79143+ sig->gr_sport == sport && sig->gr_dport == dport))
79144+ return 1;
79145+ else
79146+ return 0;
79147+}
79148+
79149+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
79150+{
79151+ struct conn_table_entry **match;
79152+ unsigned int index;
79153+
79154+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
79155+ sig->gr_sport, sig->gr_dport,
79156+ gr_conn_table_size);
79157+
79158+ newent->sig = sig;
79159+
79160+ match = &gr_conn_table[index];
79161+ newent->next = *match;
79162+ *match = newent;
79163+
79164+ return;
79165+}
79166+
79167+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
79168+{
79169+ struct conn_table_entry *match, *last = NULL;
79170+ unsigned int index;
79171+
79172+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
79173+ sig->gr_sport, sig->gr_dport,
79174+ gr_conn_table_size);
79175+
79176+ match = gr_conn_table[index];
79177+ while (match && !conn_match(match->sig,
79178+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
79179+ sig->gr_dport)) {
79180+ last = match;
79181+ match = match->next;
79182+ }
79183+
79184+ if (match) {
79185+ if (last)
79186+ last->next = match->next;
79187+ else
79188+ gr_conn_table[index] = NULL;
79189+ kfree(match);
79190+ }
79191+
79192+ return;
79193+}
79194+
79195+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
79196+ __u16 sport, __u16 dport)
79197+{
79198+ struct conn_table_entry *match;
79199+ unsigned int index;
79200+
79201+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
79202+
79203+ match = gr_conn_table[index];
79204+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
79205+ match = match->next;
79206+
79207+ if (match)
79208+ return match->sig;
79209+ else
79210+ return NULL;
79211+}
79212+
79213+#endif
79214+
79215+void gr_update_task_in_ip_table(const struct inet_sock *inet)
79216+{
79217+#ifdef CONFIG_GRKERNSEC
79218+ struct signal_struct *sig = current->signal;
79219+ struct conn_table_entry *newent;
79220+
79221+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
79222+ if (newent == NULL)
79223+ return;
79224+ /* no bh lock needed since we are called with bh disabled */
79225+ spin_lock(&gr_conn_table_lock);
79226+ gr_del_task_from_ip_table_nolock(sig);
79227+ sig->gr_saddr = inet->inet_rcv_saddr;
79228+ sig->gr_daddr = inet->inet_daddr;
79229+ sig->gr_sport = inet->inet_sport;
79230+ sig->gr_dport = inet->inet_dport;
79231+ gr_add_to_task_ip_table_nolock(sig, newent);
79232+ spin_unlock(&gr_conn_table_lock);
79233+#endif
79234+ return;
79235+}
79236+
79237+void gr_del_task_from_ip_table(struct task_struct *task)
79238+{
79239+#ifdef CONFIG_GRKERNSEC
79240+ spin_lock_bh(&gr_conn_table_lock);
79241+ gr_del_task_from_ip_table_nolock(task->signal);
79242+ spin_unlock_bh(&gr_conn_table_lock);
79243+#endif
79244+ return;
79245+}
79246+
79247+void
79248+gr_attach_curr_ip(const struct sock *sk)
79249+{
79250+#ifdef CONFIG_GRKERNSEC
79251+ struct signal_struct *p, *set;
79252+ const struct inet_sock *inet = inet_sk(sk);
79253+
79254+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
79255+ return;
79256+
79257+ set = current->signal;
79258+
79259+ spin_lock_bh(&gr_conn_table_lock);
79260+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
79261+ inet->inet_dport, inet->inet_sport);
79262+ if (unlikely(p != NULL)) {
79263+ set->curr_ip = p->curr_ip;
79264+ set->used_accept = 1;
79265+ gr_del_task_from_ip_table_nolock(p);
79266+ spin_unlock_bh(&gr_conn_table_lock);
79267+ return;
79268+ }
79269+ spin_unlock_bh(&gr_conn_table_lock);
79270+
79271+ set->curr_ip = inet->inet_daddr;
79272+ set->used_accept = 1;
79273+#endif
79274+ return;
79275+}
79276+
79277+int
79278+gr_handle_sock_all(const int family, const int type, const int protocol)
79279+{
79280+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
79281+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
79282+ (family != AF_UNIX)) {
79283+ if (family == AF_INET)
79284+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
79285+ else
79286+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
79287+ return -EACCES;
79288+ }
79289+#endif
79290+ return 0;
79291+}
79292+
79293+int
79294+gr_handle_sock_server(const struct sockaddr *sck)
79295+{
79296+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
79297+ if (grsec_enable_socket_server &&
79298+ in_group_p(grsec_socket_server_gid) &&
79299+ sck && (sck->sa_family != AF_UNIX) &&
79300+ (sck->sa_family != AF_LOCAL)) {
79301+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
79302+ return -EACCES;
79303+ }
79304+#endif
79305+ return 0;
79306+}
79307+
79308+int
79309+gr_handle_sock_server_other(const struct sock *sck)
79310+{
79311+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
79312+ if (grsec_enable_socket_server &&
79313+ in_group_p(grsec_socket_server_gid) &&
79314+ sck && (sck->sk_family != AF_UNIX) &&
79315+ (sck->sk_family != AF_LOCAL)) {
79316+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
79317+ return -EACCES;
79318+ }
79319+#endif
79320+ return 0;
79321+}
79322+
79323+int
79324+gr_handle_sock_client(const struct sockaddr *sck)
79325+{
79326+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
79327+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
79328+ sck && (sck->sa_family != AF_UNIX) &&
79329+ (sck->sa_family != AF_LOCAL)) {
79330+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
79331+ return -EACCES;
79332+ }
79333+#endif
79334+ return 0;
79335+}
79336diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
79337new file mode 100644
79338index 0000000..8159888
79339--- /dev/null
79340+++ b/grsecurity/grsec_sysctl.c
79341@@ -0,0 +1,479 @@
79342+#include <linux/kernel.h>
79343+#include <linux/sched.h>
79344+#include <linux/sysctl.h>
79345+#include <linux/grsecurity.h>
79346+#include <linux/grinternal.h>
79347+
79348+int
79349+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
79350+{
79351+#ifdef CONFIG_GRKERNSEC_SYSCTL
79352+ if (dirname == NULL || name == NULL)
79353+ return 0;
79354+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
79355+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
79356+ return -EACCES;
79357+ }
79358+#endif
79359+ return 0;
79360+}
79361+
79362+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
79363+static int __maybe_unused __read_only one = 1;
79364+#endif
79365+
79366+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
79367+ defined(CONFIG_GRKERNSEC_DENYUSB)
79368+struct ctl_table grsecurity_table[] = {
79369+#ifdef CONFIG_GRKERNSEC_SYSCTL
79370+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
79371+#ifdef CONFIG_GRKERNSEC_IO
79372+ {
79373+ .procname = "disable_priv_io",
79374+ .data = &grsec_disable_privio,
79375+ .maxlen = sizeof(int),
79376+ .mode = 0600,
79377+ .proc_handler = &proc_dointvec,
79378+ },
79379+#endif
79380+#endif
79381+#ifdef CONFIG_GRKERNSEC_LINK
79382+ {
79383+ .procname = "linking_restrictions",
79384+ .data = &grsec_enable_link,
79385+ .maxlen = sizeof(int),
79386+ .mode = 0600,
79387+ .proc_handler = &proc_dointvec,
79388+ },
79389+#endif
79390+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
79391+ {
79392+ .procname = "enforce_symlinksifowner",
79393+ .data = &grsec_enable_symlinkown,
79394+ .maxlen = sizeof(int),
79395+ .mode = 0600,
79396+ .proc_handler = &proc_dointvec,
79397+ },
79398+ {
79399+ .procname = "symlinkown_gid",
79400+ .data = &grsec_symlinkown_gid,
79401+ .maxlen = sizeof(int),
79402+ .mode = 0600,
79403+ .proc_handler = &proc_dointvec,
79404+ },
79405+#endif
79406+#ifdef CONFIG_GRKERNSEC_BRUTE
79407+ {
79408+ .procname = "deter_bruteforce",
79409+ .data = &grsec_enable_brute,
79410+ .maxlen = sizeof(int),
79411+ .mode = 0600,
79412+ .proc_handler = &proc_dointvec,
79413+ },
79414+#endif
79415+#ifdef CONFIG_GRKERNSEC_FIFO
79416+ {
79417+ .procname = "fifo_restrictions",
79418+ .data = &grsec_enable_fifo,
79419+ .maxlen = sizeof(int),
79420+ .mode = 0600,
79421+ .proc_handler = &proc_dointvec,
79422+ },
79423+#endif
79424+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
79425+ {
79426+ .procname = "ptrace_readexec",
79427+ .data = &grsec_enable_ptrace_readexec,
79428+ .maxlen = sizeof(int),
79429+ .mode = 0600,
79430+ .proc_handler = &proc_dointvec,
79431+ },
79432+#endif
79433+#ifdef CONFIG_GRKERNSEC_SETXID
79434+ {
79435+ .procname = "consistent_setxid",
79436+ .data = &grsec_enable_setxid,
79437+ .maxlen = sizeof(int),
79438+ .mode = 0600,
79439+ .proc_handler = &proc_dointvec,
79440+ },
79441+#endif
79442+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
79443+ {
79444+ .procname = "ip_blackhole",
79445+ .data = &grsec_enable_blackhole,
79446+ .maxlen = sizeof(int),
79447+ .mode = 0600,
79448+ .proc_handler = &proc_dointvec,
79449+ },
79450+ {
79451+ .procname = "lastack_retries",
79452+ .data = &grsec_lastack_retries,
79453+ .maxlen = sizeof(int),
79454+ .mode = 0600,
79455+ .proc_handler = &proc_dointvec,
79456+ },
79457+#endif
79458+#ifdef CONFIG_GRKERNSEC_EXECLOG
79459+ {
79460+ .procname = "exec_logging",
79461+ .data = &grsec_enable_execlog,
79462+ .maxlen = sizeof(int),
79463+ .mode = 0600,
79464+ .proc_handler = &proc_dointvec,
79465+ },
79466+#endif
79467+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
79468+ {
79469+ .procname = "rwxmap_logging",
79470+ .data = &grsec_enable_log_rwxmaps,
79471+ .maxlen = sizeof(int),
79472+ .mode = 0600,
79473+ .proc_handler = &proc_dointvec,
79474+ },
79475+#endif
79476+#ifdef CONFIG_GRKERNSEC_SIGNAL
79477+ {
79478+ .procname = "signal_logging",
79479+ .data = &grsec_enable_signal,
79480+ .maxlen = sizeof(int),
79481+ .mode = 0600,
79482+ .proc_handler = &proc_dointvec,
79483+ },
79484+#endif
79485+#ifdef CONFIG_GRKERNSEC_FORKFAIL
79486+ {
79487+ .procname = "forkfail_logging",
79488+ .data = &grsec_enable_forkfail,
79489+ .maxlen = sizeof(int),
79490+ .mode = 0600,
79491+ .proc_handler = &proc_dointvec,
79492+ },
79493+#endif
79494+#ifdef CONFIG_GRKERNSEC_TIME
79495+ {
79496+ .procname = "timechange_logging",
79497+ .data = &grsec_enable_time,
79498+ .maxlen = sizeof(int),
79499+ .mode = 0600,
79500+ .proc_handler = &proc_dointvec,
79501+ },
79502+#endif
79503+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
79504+ {
79505+ .procname = "chroot_deny_shmat",
79506+ .data = &grsec_enable_chroot_shmat,
79507+ .maxlen = sizeof(int),
79508+ .mode = 0600,
79509+ .proc_handler = &proc_dointvec,
79510+ },
79511+#endif
79512+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
79513+ {
79514+ .procname = "chroot_deny_unix",
79515+ .data = &grsec_enable_chroot_unix,
79516+ .maxlen = sizeof(int),
79517+ .mode = 0600,
79518+ .proc_handler = &proc_dointvec,
79519+ },
79520+#endif
79521+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
79522+ {
79523+ .procname = "chroot_deny_mount",
79524+ .data = &grsec_enable_chroot_mount,
79525+ .maxlen = sizeof(int),
79526+ .mode = 0600,
79527+ .proc_handler = &proc_dointvec,
79528+ },
79529+#endif
79530+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
79531+ {
79532+ .procname = "chroot_deny_fchdir",
79533+ .data = &grsec_enable_chroot_fchdir,
79534+ .maxlen = sizeof(int),
79535+ .mode = 0600,
79536+ .proc_handler = &proc_dointvec,
79537+ },
79538+#endif
79539+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
79540+ {
79541+ .procname = "chroot_deny_chroot",
79542+ .data = &grsec_enable_chroot_double,
79543+ .maxlen = sizeof(int),
79544+ .mode = 0600,
79545+ .proc_handler = &proc_dointvec,
79546+ },
79547+#endif
79548+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
79549+ {
79550+ .procname = "chroot_deny_pivot",
79551+ .data = &grsec_enable_chroot_pivot,
79552+ .maxlen = sizeof(int),
79553+ .mode = 0600,
79554+ .proc_handler = &proc_dointvec,
79555+ },
79556+#endif
79557+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
79558+ {
79559+ .procname = "chroot_enforce_chdir",
79560+ .data = &grsec_enable_chroot_chdir,
79561+ .maxlen = sizeof(int),
79562+ .mode = 0600,
79563+ .proc_handler = &proc_dointvec,
79564+ },
79565+#endif
79566+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
79567+ {
79568+ .procname = "chroot_deny_chmod",
79569+ .data = &grsec_enable_chroot_chmod,
79570+ .maxlen = sizeof(int),
79571+ .mode = 0600,
79572+ .proc_handler = &proc_dointvec,
79573+ },
79574+#endif
79575+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
79576+ {
79577+ .procname = "chroot_deny_mknod",
79578+ .data = &grsec_enable_chroot_mknod,
79579+ .maxlen = sizeof(int),
79580+ .mode = 0600,
79581+ .proc_handler = &proc_dointvec,
79582+ },
79583+#endif
79584+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
79585+ {
79586+ .procname = "chroot_restrict_nice",
79587+ .data = &grsec_enable_chroot_nice,
79588+ .maxlen = sizeof(int),
79589+ .mode = 0600,
79590+ .proc_handler = &proc_dointvec,
79591+ },
79592+#endif
79593+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
79594+ {
79595+ .procname = "chroot_execlog",
79596+ .data = &grsec_enable_chroot_execlog,
79597+ .maxlen = sizeof(int),
79598+ .mode = 0600,
79599+ .proc_handler = &proc_dointvec,
79600+ },
79601+#endif
79602+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
79603+ {
79604+ .procname = "chroot_caps",
79605+ .data = &grsec_enable_chroot_caps,
79606+ .maxlen = sizeof(int),
79607+ .mode = 0600,
79608+ .proc_handler = &proc_dointvec,
79609+ },
79610+#endif
79611+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
79612+ {
79613+ .procname = "chroot_deny_sysctl",
79614+ .data = &grsec_enable_chroot_sysctl,
79615+ .maxlen = sizeof(int),
79616+ .mode = 0600,
79617+ .proc_handler = &proc_dointvec,
79618+ },
79619+#endif
79620+#ifdef CONFIG_GRKERNSEC_TPE
79621+ {
79622+ .procname = "tpe",
79623+ .data = &grsec_enable_tpe,
79624+ .maxlen = sizeof(int),
79625+ .mode = 0600,
79626+ .proc_handler = &proc_dointvec,
79627+ },
79628+ {
79629+ .procname = "tpe_gid",
79630+ .data = &grsec_tpe_gid,
79631+ .maxlen = sizeof(int),
79632+ .mode = 0600,
79633+ .proc_handler = &proc_dointvec,
79634+ },
79635+#endif
79636+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
79637+ {
79638+ .procname = "tpe_invert",
79639+ .data = &grsec_enable_tpe_invert,
79640+ .maxlen = sizeof(int),
79641+ .mode = 0600,
79642+ .proc_handler = &proc_dointvec,
79643+ },
79644+#endif
79645+#ifdef CONFIG_GRKERNSEC_TPE_ALL
79646+ {
79647+ .procname = "tpe_restrict_all",
79648+ .data = &grsec_enable_tpe_all,
79649+ .maxlen = sizeof(int),
79650+ .mode = 0600,
79651+ .proc_handler = &proc_dointvec,
79652+ },
79653+#endif
79654+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
79655+ {
79656+ .procname = "socket_all",
79657+ .data = &grsec_enable_socket_all,
79658+ .maxlen = sizeof(int),
79659+ .mode = 0600,
79660+ .proc_handler = &proc_dointvec,
79661+ },
79662+ {
79663+ .procname = "socket_all_gid",
79664+ .data = &grsec_socket_all_gid,
79665+ .maxlen = sizeof(int),
79666+ .mode = 0600,
79667+ .proc_handler = &proc_dointvec,
79668+ },
79669+#endif
79670+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
79671+ {
79672+ .procname = "socket_client",
79673+ .data = &grsec_enable_socket_client,
79674+ .maxlen = sizeof(int),
79675+ .mode = 0600,
79676+ .proc_handler = &proc_dointvec,
79677+ },
79678+ {
79679+ .procname = "socket_client_gid",
79680+ .data = &grsec_socket_client_gid,
79681+ .maxlen = sizeof(int),
79682+ .mode = 0600,
79683+ .proc_handler = &proc_dointvec,
79684+ },
79685+#endif
79686+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
79687+ {
79688+ .procname = "socket_server",
79689+ .data = &grsec_enable_socket_server,
79690+ .maxlen = sizeof(int),
79691+ .mode = 0600,
79692+ .proc_handler = &proc_dointvec,
79693+ },
79694+ {
79695+ .procname = "socket_server_gid",
79696+ .data = &grsec_socket_server_gid,
79697+ .maxlen = sizeof(int),
79698+ .mode = 0600,
79699+ .proc_handler = &proc_dointvec,
79700+ },
79701+#endif
79702+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
79703+ {
79704+ .procname = "audit_group",
79705+ .data = &grsec_enable_group,
79706+ .maxlen = sizeof(int),
79707+ .mode = 0600,
79708+ .proc_handler = &proc_dointvec,
79709+ },
79710+ {
79711+ .procname = "audit_gid",
79712+ .data = &grsec_audit_gid,
79713+ .maxlen = sizeof(int),
79714+ .mode = 0600,
79715+ .proc_handler = &proc_dointvec,
79716+ },
79717+#endif
79718+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
79719+ {
79720+ .procname = "audit_chdir",
79721+ .data = &grsec_enable_chdir,
79722+ .maxlen = sizeof(int),
79723+ .mode = 0600,
79724+ .proc_handler = &proc_dointvec,
79725+ },
79726+#endif
79727+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
79728+ {
79729+ .procname = "audit_mount",
79730+ .data = &grsec_enable_mount,
79731+ .maxlen = sizeof(int),
79732+ .mode = 0600,
79733+ .proc_handler = &proc_dointvec,
79734+ },
79735+#endif
79736+#ifdef CONFIG_GRKERNSEC_DMESG
79737+ {
79738+ .procname = "dmesg",
79739+ .data = &grsec_enable_dmesg,
79740+ .maxlen = sizeof(int),
79741+ .mode = 0600,
79742+ .proc_handler = &proc_dointvec,
79743+ },
79744+#endif
79745+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
79746+ {
79747+ .procname = "chroot_findtask",
79748+ .data = &grsec_enable_chroot_findtask,
79749+ .maxlen = sizeof(int),
79750+ .mode = 0600,
79751+ .proc_handler = &proc_dointvec,
79752+ },
79753+#endif
79754+#ifdef CONFIG_GRKERNSEC_RESLOG
79755+ {
79756+ .procname = "resource_logging",
79757+ .data = &grsec_resource_logging,
79758+ .maxlen = sizeof(int),
79759+ .mode = 0600,
79760+ .proc_handler = &proc_dointvec,
79761+ },
79762+#endif
79763+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
79764+ {
79765+ .procname = "audit_ptrace",
79766+ .data = &grsec_enable_audit_ptrace,
79767+ .maxlen = sizeof(int),
79768+ .mode = 0600,
79769+ .proc_handler = &proc_dointvec,
79770+ },
79771+#endif
79772+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
79773+ {
79774+ .procname = "harden_ptrace",
79775+ .data = &grsec_enable_harden_ptrace,
79776+ .maxlen = sizeof(int),
79777+ .mode = 0600,
79778+ .proc_handler = &proc_dointvec,
79779+ },
79780+#endif
79781+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
79782+ {
79783+ .procname = "harden_ipc",
79784+ .data = &grsec_enable_harden_ipc,
79785+ .maxlen = sizeof(int),
79786+ .mode = 0600,
79787+ .proc_handler = &proc_dointvec,
79788+ },
79789+#endif
79790+ {
79791+ .procname = "grsec_lock",
79792+ .data = &grsec_lock,
79793+ .maxlen = sizeof(int),
79794+ .mode = 0600,
79795+ .proc_handler = &proc_dointvec,
79796+ },
79797+#endif
79798+#ifdef CONFIG_GRKERNSEC_ROFS
79799+ {
79800+ .procname = "romount_protect",
79801+ .data = &grsec_enable_rofs,
79802+ .maxlen = sizeof(int),
79803+ .mode = 0600,
79804+ .proc_handler = &proc_dointvec_minmax,
79805+ .extra1 = &one,
79806+ .extra2 = &one,
79807+ },
79808+#endif
79809+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
79810+ {
79811+ .procname = "deny_new_usb",
79812+ .data = &grsec_deny_new_usb,
79813+ .maxlen = sizeof(int),
79814+ .mode = 0600,
79815+ .proc_handler = &proc_dointvec,
79816+ },
79817+#endif
79818+ { }
79819+};
79820+#endif
79821diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
79822new file mode 100644
79823index 0000000..61b514e
79824--- /dev/null
79825+++ b/grsecurity/grsec_time.c
79826@@ -0,0 +1,16 @@
79827+#include <linux/kernel.h>
79828+#include <linux/sched.h>
79829+#include <linux/grinternal.h>
79830+#include <linux/module.h>
79831+
79832+void
79833+gr_log_timechange(void)
79834+{
79835+#ifdef CONFIG_GRKERNSEC_TIME
79836+ if (grsec_enable_time)
79837+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
79838+#endif
79839+ return;
79840+}
79841+
79842+EXPORT_SYMBOL_GPL(gr_log_timechange);
79843diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
79844new file mode 100644
79845index 0000000..d1953de
79846--- /dev/null
79847+++ b/grsecurity/grsec_tpe.c
79848@@ -0,0 +1,78 @@
79849+#include <linux/kernel.h>
79850+#include <linux/sched.h>
79851+#include <linux/file.h>
79852+#include <linux/fs.h>
79853+#include <linux/grinternal.h>
79854+
79855+extern int gr_acl_tpe_check(void);
79856+
79857+int
79858+gr_tpe_allow(const struct file *file)
79859+{
79860+#ifdef CONFIG_GRKERNSEC
79861+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
79862+ struct inode *file_inode = file->f_path.dentry->d_inode;
79863+ const struct cred *cred = current_cred();
79864+ char *msg = NULL;
79865+ char *msg2 = NULL;
79866+
79867+ // never restrict root
79868+ if (gr_is_global_root(cred->uid))
79869+ return 1;
79870+
79871+ if (grsec_enable_tpe) {
79872+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
79873+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
79874+ msg = "not being in trusted group";
79875+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
79876+ msg = "being in untrusted group";
79877+#else
79878+ if (in_group_p(grsec_tpe_gid))
79879+ msg = "being in untrusted group";
79880+#endif
79881+ }
79882+ if (!msg && gr_acl_tpe_check())
79883+ msg = "being in untrusted role";
79884+
79885+ // not in any affected group/role
79886+ if (!msg)
79887+ goto next_check;
79888+
79889+ if (gr_is_global_nonroot(inode->i_uid))
79890+ msg2 = "file in non-root-owned directory";
79891+ else if (inode->i_mode & S_IWOTH)
79892+ msg2 = "file in world-writable directory";
79893+ else if (inode->i_mode & S_IWGRP)
79894+ msg2 = "file in group-writable directory";
79895+ else if (file_inode->i_mode & S_IWOTH)
79896+ msg2 = "file is world-writable";
79897+
79898+ if (msg && msg2) {
79899+ char fullmsg[70] = {0};
79900+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
79901+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
79902+ return 0;
79903+ }
79904+ msg = NULL;
79905+next_check:
79906+#ifdef CONFIG_GRKERNSEC_TPE_ALL
79907+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
79908+ return 1;
79909+
79910+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
79911+ msg = "directory not owned by user";
79912+ else if (inode->i_mode & S_IWOTH)
79913+ msg = "file in world-writable directory";
79914+ else if (inode->i_mode & S_IWGRP)
79915+ msg = "file in group-writable directory";
79916+ else if (file_inode->i_mode & S_IWOTH)
79917+ msg = "file is world-writable";
79918+
79919+ if (msg) {
79920+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
79921+ return 0;
79922+ }
79923+#endif
79924+#endif
79925+ return 1;
79926+}
79927diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
79928new file mode 100644
79929index 0000000..ae02d8e
79930--- /dev/null
79931+++ b/grsecurity/grsec_usb.c
79932@@ -0,0 +1,15 @@
79933+#include <linux/kernel.h>
79934+#include <linux/grinternal.h>
79935+#include <linux/module.h>
79936+
79937+int gr_handle_new_usb(void)
79938+{
79939+#ifdef CONFIG_GRKERNSEC_DENYUSB
79940+ if (grsec_deny_new_usb) {
79941+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
79942+ return 1;
79943+ }
79944+#endif
79945+ return 0;
79946+}
79947+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
79948diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
79949new file mode 100644
79950index 0000000..158b330
79951--- /dev/null
79952+++ b/grsecurity/grsum.c
79953@@ -0,0 +1,64 @@
79954+#include <linux/err.h>
79955+#include <linux/kernel.h>
79956+#include <linux/sched.h>
79957+#include <linux/mm.h>
79958+#include <linux/scatterlist.h>
79959+#include <linux/crypto.h>
79960+#include <linux/gracl.h>
79961+
79962+
79963+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
79964+#error "crypto and sha256 must be built into the kernel"
79965+#endif
79966+
79967+int
79968+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
79969+{
79970+ struct crypto_hash *tfm;
79971+ struct hash_desc desc;
79972+ struct scatterlist sg[2];
79973+ unsigned char temp_sum[GR_SHA_LEN] __attribute__((aligned(__alignof__(unsigned long))));
79974+ unsigned long *tmpsumptr = (unsigned long *)temp_sum;
79975+ unsigned long *sumptr = (unsigned long *)sum;
79976+ int cryptres;
79977+ int retval = 1;
79978+ volatile int mismatched = 0;
79979+ volatile int dummy = 0;
79980+ unsigned int i;
79981+
79982+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
79983+ if (IS_ERR(tfm)) {
79984+ /* should never happen, since sha256 should be built in */
79985+ memset(entry->pw, 0, GR_PW_LEN);
79986+ return 1;
79987+ }
79988+
79989+ sg_init_table(sg, 2);
79990+ sg_set_buf(&sg[0], salt, GR_SALT_LEN);
79991+ sg_set_buf(&sg[1], entry->pw, strlen(entry->pw));
79992+
79993+ desc.tfm = tfm;
79994+ desc.flags = 0;
79995+
79996+ cryptres = crypto_hash_digest(&desc, sg, GR_SALT_LEN + strlen(entry->pw),
79997+ temp_sum);
79998+
79999+ memset(entry->pw, 0, GR_PW_LEN);
80000+
80001+ if (cryptres)
80002+ goto out;
80003+
80004+ for (i = 0; i < GR_SHA_LEN/sizeof(tmpsumptr[0]); i++)
80005+ if (sumptr[i] != tmpsumptr[i])
80006+ mismatched = 1;
80007+ else
80008+ dummy = 1; // waste a cycle
80009+
80010+ if (!mismatched)
80011+ retval = dummy - 1;
80012+
80013+out:
80014+ crypto_free_hash(tfm);
80015+
80016+ return retval;
80017+}
80018diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
80019index 77ff547..181834f 100644
80020--- a/include/asm-generic/4level-fixup.h
80021+++ b/include/asm-generic/4level-fixup.h
80022@@ -13,8 +13,10 @@
80023 #define pmd_alloc(mm, pud, address) \
80024 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
80025 NULL: pmd_offset(pud, address))
80026+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
80027
80028 #define pud_alloc(mm, pgd, address) (pgd)
80029+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
80030 #define pud_offset(pgd, start) (pgd)
80031 #define pud_none(pud) 0
80032 #define pud_bad(pud) 0
80033diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
80034index b7babf0..97f4c4f 100644
80035--- a/include/asm-generic/atomic-long.h
80036+++ b/include/asm-generic/atomic-long.h
80037@@ -22,6 +22,12 @@
80038
80039 typedef atomic64_t atomic_long_t;
80040
80041+#ifdef CONFIG_PAX_REFCOUNT
80042+typedef atomic64_unchecked_t atomic_long_unchecked_t;
80043+#else
80044+typedef atomic64_t atomic_long_unchecked_t;
80045+#endif
80046+
80047 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
80048
80049 static inline long atomic_long_read(atomic_long_t *l)
80050@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
80051 return (long)atomic64_read(v);
80052 }
80053
80054+#ifdef CONFIG_PAX_REFCOUNT
80055+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
80056+{
80057+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80058+
80059+ return (long)atomic64_read_unchecked(v);
80060+}
80061+#endif
80062+
80063 static inline void atomic_long_set(atomic_long_t *l, long i)
80064 {
80065 atomic64_t *v = (atomic64_t *)l;
80066@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
80067 atomic64_set(v, i);
80068 }
80069
80070+#ifdef CONFIG_PAX_REFCOUNT
80071+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
80072+{
80073+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80074+
80075+ atomic64_set_unchecked(v, i);
80076+}
80077+#endif
80078+
80079 static inline void atomic_long_inc(atomic_long_t *l)
80080 {
80081 atomic64_t *v = (atomic64_t *)l;
80082@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
80083 atomic64_inc(v);
80084 }
80085
80086+#ifdef CONFIG_PAX_REFCOUNT
80087+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
80088+{
80089+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80090+
80091+ atomic64_inc_unchecked(v);
80092+}
80093+#endif
80094+
80095 static inline void atomic_long_dec(atomic_long_t *l)
80096 {
80097 atomic64_t *v = (atomic64_t *)l;
80098@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
80099 atomic64_dec(v);
80100 }
80101
80102+#ifdef CONFIG_PAX_REFCOUNT
80103+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
80104+{
80105+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80106+
80107+ atomic64_dec_unchecked(v);
80108+}
80109+#endif
80110+
80111 static inline void atomic_long_add(long i, atomic_long_t *l)
80112 {
80113 atomic64_t *v = (atomic64_t *)l;
80114@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
80115 atomic64_add(i, v);
80116 }
80117
80118+#ifdef CONFIG_PAX_REFCOUNT
80119+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
80120+{
80121+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80122+
80123+ atomic64_add_unchecked(i, v);
80124+}
80125+#endif
80126+
80127 static inline void atomic_long_sub(long i, atomic_long_t *l)
80128 {
80129 atomic64_t *v = (atomic64_t *)l;
80130@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
80131 atomic64_sub(i, v);
80132 }
80133
80134+#ifdef CONFIG_PAX_REFCOUNT
80135+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
80136+{
80137+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80138+
80139+ atomic64_sub_unchecked(i, v);
80140+}
80141+#endif
80142+
80143 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
80144 {
80145 atomic64_t *v = (atomic64_t *)l;
80146@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
80147 return atomic64_add_negative(i, v);
80148 }
80149
80150-static inline long atomic_long_add_return(long i, atomic_long_t *l)
80151+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
80152 {
80153 atomic64_t *v = (atomic64_t *)l;
80154
80155 return (long)atomic64_add_return(i, v);
80156 }
80157
80158+#ifdef CONFIG_PAX_REFCOUNT
80159+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
80160+{
80161+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80162+
80163+ return (long)atomic64_add_return_unchecked(i, v);
80164+}
80165+#endif
80166+
80167 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
80168 {
80169 atomic64_t *v = (atomic64_t *)l;
80170@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
80171 return (long)atomic64_inc_return(v);
80172 }
80173
80174+#ifdef CONFIG_PAX_REFCOUNT
80175+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
80176+{
80177+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
80178+
80179+ return (long)atomic64_inc_return_unchecked(v);
80180+}
80181+#endif
80182+
80183 static inline long atomic_long_dec_return(atomic_long_t *l)
80184 {
80185 atomic64_t *v = (atomic64_t *)l;
80186@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
80187
80188 typedef atomic_t atomic_long_t;
80189
80190+#ifdef CONFIG_PAX_REFCOUNT
80191+typedef atomic_unchecked_t atomic_long_unchecked_t;
80192+#else
80193+typedef atomic_t atomic_long_unchecked_t;
80194+#endif
80195+
80196 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
80197 static inline long atomic_long_read(atomic_long_t *l)
80198 {
80199@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
80200 return (long)atomic_read(v);
80201 }
80202
80203+#ifdef CONFIG_PAX_REFCOUNT
80204+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
80205+{
80206+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80207+
80208+ return (long)atomic_read_unchecked(v);
80209+}
80210+#endif
80211+
80212 static inline void atomic_long_set(atomic_long_t *l, long i)
80213 {
80214 atomic_t *v = (atomic_t *)l;
80215@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
80216 atomic_set(v, i);
80217 }
80218
80219+#ifdef CONFIG_PAX_REFCOUNT
80220+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
80221+{
80222+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80223+
80224+ atomic_set_unchecked(v, i);
80225+}
80226+#endif
80227+
80228 static inline void atomic_long_inc(atomic_long_t *l)
80229 {
80230 atomic_t *v = (atomic_t *)l;
80231@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
80232 atomic_inc(v);
80233 }
80234
80235+#ifdef CONFIG_PAX_REFCOUNT
80236+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
80237+{
80238+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80239+
80240+ atomic_inc_unchecked(v);
80241+}
80242+#endif
80243+
80244 static inline void atomic_long_dec(atomic_long_t *l)
80245 {
80246 atomic_t *v = (atomic_t *)l;
80247@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
80248 atomic_dec(v);
80249 }
80250
80251+#ifdef CONFIG_PAX_REFCOUNT
80252+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
80253+{
80254+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80255+
80256+ atomic_dec_unchecked(v);
80257+}
80258+#endif
80259+
80260 static inline void atomic_long_add(long i, atomic_long_t *l)
80261 {
80262 atomic_t *v = (atomic_t *)l;
80263@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
80264 atomic_add(i, v);
80265 }
80266
80267+#ifdef CONFIG_PAX_REFCOUNT
80268+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
80269+{
80270+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80271+
80272+ atomic_add_unchecked(i, v);
80273+}
80274+#endif
80275+
80276 static inline void atomic_long_sub(long i, atomic_long_t *l)
80277 {
80278 atomic_t *v = (atomic_t *)l;
80279@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
80280 atomic_sub(i, v);
80281 }
80282
80283+#ifdef CONFIG_PAX_REFCOUNT
80284+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
80285+{
80286+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80287+
80288+ atomic_sub_unchecked(i, v);
80289+}
80290+#endif
80291+
80292 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
80293 {
80294 atomic_t *v = (atomic_t *)l;
80295@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
80296 return (long)atomic_add_return(i, v);
80297 }
80298
80299+#ifdef CONFIG_PAX_REFCOUNT
80300+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
80301+{
80302+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80303+
80304+ return (long)atomic_add_return_unchecked(i, v);
80305+}
80306+
80307+#endif
80308+
80309 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
80310 {
80311 atomic_t *v = (atomic_t *)l;
80312@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
80313 return (long)atomic_inc_return(v);
80314 }
80315
80316+#ifdef CONFIG_PAX_REFCOUNT
80317+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
80318+{
80319+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
80320+
80321+ return (long)atomic_inc_return_unchecked(v);
80322+}
80323+#endif
80324+
80325 static inline long atomic_long_dec_return(atomic_long_t *l)
80326 {
80327 atomic_t *v = (atomic_t *)l;
80328@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
80329
80330 #endif /* BITS_PER_LONG == 64 */
80331
80332+#ifdef CONFIG_PAX_REFCOUNT
80333+static inline void pax_refcount_needs_these_functions(void)
80334+{
80335+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
80336+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
80337+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
80338+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
80339+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
80340+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
80341+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
80342+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
80343+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
80344+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
80345+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
80346+#ifdef CONFIG_X86
80347+ atomic_clear_mask_unchecked(0, NULL);
80348+ atomic_set_mask_unchecked(0, NULL);
80349+#endif
80350+
80351+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
80352+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
80353+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
80354+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
80355+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
80356+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
80357+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
80358+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
80359+}
80360+#else
80361+#define atomic_read_unchecked(v) atomic_read(v)
80362+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
80363+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
80364+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
80365+#define atomic_inc_unchecked(v) atomic_inc(v)
80366+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
80367+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
80368+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
80369+#define atomic_dec_unchecked(v) atomic_dec(v)
80370+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
80371+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
80372+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
80373+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
80374+
80375+#define atomic_long_read_unchecked(v) atomic_long_read(v)
80376+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
80377+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
80378+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
80379+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
80380+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
80381+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
80382+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
80383+#endif
80384+
80385 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
80386diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
80387index 9c79e76..9f7827d 100644
80388--- a/include/asm-generic/atomic.h
80389+++ b/include/asm-generic/atomic.h
80390@@ -154,7 +154,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
80391 * Atomically clears the bits set in @mask from @v
80392 */
80393 #ifndef atomic_clear_mask
80394-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
80395+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
80396 {
80397 unsigned long flags;
80398
80399diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
80400index b18ce4f..2ee2843 100644
80401--- a/include/asm-generic/atomic64.h
80402+++ b/include/asm-generic/atomic64.h
80403@@ -16,6 +16,8 @@ typedef struct {
80404 long long counter;
80405 } atomic64_t;
80406
80407+typedef atomic64_t atomic64_unchecked_t;
80408+
80409 #define ATOMIC64_INIT(i) { (i) }
80410
80411 extern long long atomic64_read(const atomic64_t *v);
80412@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
80413 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
80414 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
80415
80416+#define atomic64_read_unchecked(v) atomic64_read(v)
80417+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
80418+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
80419+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
80420+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
80421+#define atomic64_inc_unchecked(v) atomic64_inc(v)
80422+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
80423+#define atomic64_dec_unchecked(v) atomic64_dec(v)
80424+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
80425+
80426 #endif /* _ASM_GENERIC_ATOMIC64_H */
80427diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
80428index 1402fa8..025a736 100644
80429--- a/include/asm-generic/barrier.h
80430+++ b/include/asm-generic/barrier.h
80431@@ -74,7 +74,7 @@
80432 do { \
80433 compiletime_assert_atomic_type(*p); \
80434 smp_mb(); \
80435- ACCESS_ONCE(*p) = (v); \
80436+ ACCESS_ONCE_RW(*p) = (v); \
80437 } while (0)
80438
80439 #define smp_load_acquire(p) \
80440diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
80441index a60a7cc..0fe12f2 100644
80442--- a/include/asm-generic/bitops/__fls.h
80443+++ b/include/asm-generic/bitops/__fls.h
80444@@ -9,7 +9,7 @@
80445 *
80446 * Undefined if no set bit exists, so code should check against 0 first.
80447 */
80448-static __always_inline unsigned long __fls(unsigned long word)
80449+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
80450 {
80451 int num = BITS_PER_LONG - 1;
80452
80453diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
80454index 0576d1f..dad6c71 100644
80455--- a/include/asm-generic/bitops/fls.h
80456+++ b/include/asm-generic/bitops/fls.h
80457@@ -9,7 +9,7 @@
80458 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
80459 */
80460
80461-static __always_inline int fls(int x)
80462+static __always_inline int __intentional_overflow(-1) fls(int x)
80463 {
80464 int r = 32;
80465
80466diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
80467index b097cf8..3d40e14 100644
80468--- a/include/asm-generic/bitops/fls64.h
80469+++ b/include/asm-generic/bitops/fls64.h
80470@@ -15,7 +15,7 @@
80471 * at position 64.
80472 */
80473 #if BITS_PER_LONG == 32
80474-static __always_inline int fls64(__u64 x)
80475+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
80476 {
80477 __u32 h = x >> 32;
80478 if (h)
80479@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
80480 return fls(x);
80481 }
80482 #elif BITS_PER_LONG == 64
80483-static __always_inline int fls64(__u64 x)
80484+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
80485 {
80486 if (x == 0)
80487 return 0;
80488diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
80489index 1bfcfe5..e04c5c9 100644
80490--- a/include/asm-generic/cache.h
80491+++ b/include/asm-generic/cache.h
80492@@ -6,7 +6,7 @@
80493 * cache lines need to provide their own cache.h.
80494 */
80495
80496-#define L1_CACHE_SHIFT 5
80497-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
80498+#define L1_CACHE_SHIFT 5UL
80499+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
80500
80501 #endif /* __ASM_GENERIC_CACHE_H */
80502diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
80503index 0d68a1e..b74a761 100644
80504--- a/include/asm-generic/emergency-restart.h
80505+++ b/include/asm-generic/emergency-restart.h
80506@@ -1,7 +1,7 @@
80507 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
80508 #define _ASM_GENERIC_EMERGENCY_RESTART_H
80509
80510-static inline void machine_emergency_restart(void)
80511+static inline __noreturn void machine_emergency_restart(void)
80512 {
80513 machine_restart(NULL);
80514 }
80515diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
80516index 975e1cc..0b8a083 100644
80517--- a/include/asm-generic/io.h
80518+++ b/include/asm-generic/io.h
80519@@ -289,7 +289,7 @@ static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
80520 * These are pretty trivial
80521 */
80522 #ifndef virt_to_phys
80523-static inline unsigned long virt_to_phys(volatile void *address)
80524+static inline unsigned long __intentional_overflow(-1) virt_to_phys(volatile void *address)
80525 {
80526 return __pa((unsigned long)address);
80527 }
80528diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
80529index 90f99c7..00ce236 100644
80530--- a/include/asm-generic/kmap_types.h
80531+++ b/include/asm-generic/kmap_types.h
80532@@ -2,9 +2,9 @@
80533 #define _ASM_GENERIC_KMAP_TYPES_H
80534
80535 #ifdef __WITH_KM_FENCE
80536-# define KM_TYPE_NR 41
80537+# define KM_TYPE_NR 42
80538 #else
80539-# define KM_TYPE_NR 20
80540+# define KM_TYPE_NR 21
80541 #endif
80542
80543 #endif
80544diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
80545index 9ceb03b..62b0b8f 100644
80546--- a/include/asm-generic/local.h
80547+++ b/include/asm-generic/local.h
80548@@ -23,24 +23,37 @@ typedef struct
80549 atomic_long_t a;
80550 } local_t;
80551
80552+typedef struct {
80553+ atomic_long_unchecked_t a;
80554+} local_unchecked_t;
80555+
80556 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
80557
80558 #define local_read(l) atomic_long_read(&(l)->a)
80559+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
80560 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
80561+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
80562 #define local_inc(l) atomic_long_inc(&(l)->a)
80563+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
80564 #define local_dec(l) atomic_long_dec(&(l)->a)
80565+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
80566 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
80567+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
80568 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
80569+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
80570
80571 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
80572 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
80573 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
80574 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
80575 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
80576+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
80577 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
80578 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
80579+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
80580
80581 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
80582+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
80583 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
80584 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
80585 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
80586diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
80587index 725612b..9cc513a 100644
80588--- a/include/asm-generic/pgtable-nopmd.h
80589+++ b/include/asm-generic/pgtable-nopmd.h
80590@@ -1,14 +1,19 @@
80591 #ifndef _PGTABLE_NOPMD_H
80592 #define _PGTABLE_NOPMD_H
80593
80594-#ifndef __ASSEMBLY__
80595-
80596 #include <asm-generic/pgtable-nopud.h>
80597
80598-struct mm_struct;
80599-
80600 #define __PAGETABLE_PMD_FOLDED
80601
80602+#define PMD_SHIFT PUD_SHIFT
80603+#define PTRS_PER_PMD 1
80604+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
80605+#define PMD_MASK (~(PMD_SIZE-1))
80606+
80607+#ifndef __ASSEMBLY__
80608+
80609+struct mm_struct;
80610+
80611 /*
80612 * Having the pmd type consist of a pud gets the size right, and allows
80613 * us to conceptually access the pud entry that this pmd is folded into
80614@@ -16,11 +21,6 @@ struct mm_struct;
80615 */
80616 typedef struct { pud_t pud; } pmd_t;
80617
80618-#define PMD_SHIFT PUD_SHIFT
80619-#define PTRS_PER_PMD 1
80620-#define PMD_SIZE (1UL << PMD_SHIFT)
80621-#define PMD_MASK (~(PMD_SIZE-1))
80622-
80623 /*
80624 * The "pud_xxx()" functions here are trivial for a folded two-level
80625 * setup: the pmd is never bad, and a pmd always exists (as it's folded
80626diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
80627index 810431d..0ec4804f 100644
80628--- a/include/asm-generic/pgtable-nopud.h
80629+++ b/include/asm-generic/pgtable-nopud.h
80630@@ -1,10 +1,15 @@
80631 #ifndef _PGTABLE_NOPUD_H
80632 #define _PGTABLE_NOPUD_H
80633
80634-#ifndef __ASSEMBLY__
80635-
80636 #define __PAGETABLE_PUD_FOLDED
80637
80638+#define PUD_SHIFT PGDIR_SHIFT
80639+#define PTRS_PER_PUD 1
80640+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
80641+#define PUD_MASK (~(PUD_SIZE-1))
80642+
80643+#ifndef __ASSEMBLY__
80644+
80645 /*
80646 * Having the pud type consist of a pgd gets the size right, and allows
80647 * us to conceptually access the pgd entry that this pud is folded into
80648@@ -12,11 +17,6 @@
80649 */
80650 typedef struct { pgd_t pgd; } pud_t;
80651
80652-#define PUD_SHIFT PGDIR_SHIFT
80653-#define PTRS_PER_PUD 1
80654-#define PUD_SIZE (1UL << PUD_SHIFT)
80655-#define PUD_MASK (~(PUD_SIZE-1))
80656-
80657 /*
80658 * The "pgd_xxx()" functions here are trivial for a folded two-level
80659 * setup: the pud is never bad, and a pud always exists (as it's folded
80660@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
80661 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
80662
80663 #define pgd_populate(mm, pgd, pud) do { } while (0)
80664+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
80665 /*
80666 * (puds are folded into pgds so this doesn't get actually called,
80667 * but the define is needed for a generic inline function.)
80668diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
80669index 53b2acc..f4568e7 100644
80670--- a/include/asm-generic/pgtable.h
80671+++ b/include/asm-generic/pgtable.h
80672@@ -819,6 +819,22 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
80673 }
80674 #endif /* CONFIG_NUMA_BALANCING */
80675
80676+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
80677+#ifdef CONFIG_PAX_KERNEXEC
80678+#error KERNEXEC requires pax_open_kernel
80679+#else
80680+static inline unsigned long pax_open_kernel(void) { return 0; }
80681+#endif
80682+#endif
80683+
80684+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
80685+#ifdef CONFIG_PAX_KERNEXEC
80686+#error KERNEXEC requires pax_close_kernel
80687+#else
80688+static inline unsigned long pax_close_kernel(void) { return 0; }
80689+#endif
80690+#endif
80691+
80692 #endif /* CONFIG_MMU */
80693
80694 #endif /* !__ASSEMBLY__ */
80695diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
80696index 72d8803..cb9749c 100644
80697--- a/include/asm-generic/uaccess.h
80698+++ b/include/asm-generic/uaccess.h
80699@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
80700 return __clear_user(to, n);
80701 }
80702
80703+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
80704+#ifdef CONFIG_PAX_MEMORY_UDEREF
80705+#error UDEREF requires pax_open_userland
80706+#else
80707+static inline unsigned long pax_open_userland(void) { return 0; }
80708+#endif
80709+#endif
80710+
80711+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
80712+#ifdef CONFIG_PAX_MEMORY_UDEREF
80713+#error UDEREF requires pax_close_userland
80714+#else
80715+static inline unsigned long pax_close_userland(void) { return 0; }
80716+#endif
80717+#endif
80718+
80719 #endif /* __ASM_GENERIC_UACCESS_H */
80720diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
80721index 5ba0360..e85c934 100644
80722--- a/include/asm-generic/vmlinux.lds.h
80723+++ b/include/asm-generic/vmlinux.lds.h
80724@@ -231,6 +231,7 @@
80725 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
80726 VMLINUX_SYMBOL(__start_rodata) = .; \
80727 *(.rodata) *(.rodata.*) \
80728+ *(.data..read_only) \
80729 *(__vermagic) /* Kernel version magic */ \
80730 . = ALIGN(8); \
80731 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
80732@@ -722,17 +723,18 @@
80733 * section in the linker script will go there too. @phdr should have
80734 * a leading colon.
80735 *
80736- * Note that this macros defines __per_cpu_load as an absolute symbol.
80737+ * Note that this macros defines per_cpu_load as an absolute symbol.
80738 * If there is no need to put the percpu section at a predetermined
80739 * address, use PERCPU_SECTION.
80740 */
80741 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
80742- VMLINUX_SYMBOL(__per_cpu_load) = .; \
80743- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
80744+ per_cpu_load = .; \
80745+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
80746 - LOAD_OFFSET) { \
80747+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
80748 PERCPU_INPUT(cacheline) \
80749 } phdr \
80750- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
80751+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
80752
80753 /**
80754 * PERCPU_SECTION - define output section for percpu area, simple version
80755diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
80756index 623a59c..1e79ab9 100644
80757--- a/include/crypto/algapi.h
80758+++ b/include/crypto/algapi.h
80759@@ -34,7 +34,7 @@ struct crypto_type {
80760 unsigned int maskclear;
80761 unsigned int maskset;
80762 unsigned int tfmsize;
80763-};
80764+} __do_const;
80765
80766 struct crypto_instance {
80767 struct crypto_alg alg;
80768diff --git a/include/drm/drmP.h b/include/drm/drmP.h
80769index 1968907..7d9ed9f 100644
80770--- a/include/drm/drmP.h
80771+++ b/include/drm/drmP.h
80772@@ -68,6 +68,7 @@
80773 #include <linux/workqueue.h>
80774 #include <linux/poll.h>
80775 #include <asm/pgalloc.h>
80776+#include <asm/local.h>
80777 #include <drm/drm.h>
80778 #include <drm/drm_sarea.h>
80779 #include <drm/drm_vma_manager.h>
80780@@ -260,10 +261,12 @@ do { \
80781 * \param cmd command.
80782 * \param arg argument.
80783 */
80784-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
80785+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
80786+ struct drm_file *file_priv);
80787+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
80788 struct drm_file *file_priv);
80789
80790-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
80791+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
80792 unsigned long arg);
80793
80794 #define DRM_IOCTL_NR(n) _IOC_NR(n)
80795@@ -279,10 +282,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
80796 struct drm_ioctl_desc {
80797 unsigned int cmd;
80798 int flags;
80799- drm_ioctl_t *func;
80800+ drm_ioctl_t func;
80801 unsigned int cmd_drv;
80802 const char *name;
80803-};
80804+} __do_const;
80805
80806 /**
80807 * Creates a driver or general drm_ioctl_desc array entry for the given
80808@@ -946,7 +949,8 @@ struct drm_info_list {
80809 int (*show)(struct seq_file*, void*); /** show callback */
80810 u32 driver_features; /**< Required driver features for this entry */
80811 void *data;
80812-};
80813+} __do_const;
80814+typedef struct drm_info_list __no_const drm_info_list_no_const;
80815
80816 /**
80817 * debugfs node structure. This structure represents a debugfs file.
80818@@ -1030,7 +1034,7 @@ struct drm_device {
80819
80820 /** \name Usage Counters */
80821 /*@{ */
80822- int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
80823+ local_t open_count; /**< Outstanding files open, protected by drm_global_mutex. */
80824 spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
80825 int buf_use; /**< Buffers in use -- cannot alloc */
80826 atomic_t buf_alloc; /**< Buffer allocation in progress */
80827diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
80828index a3d75fe..6802f9c 100644
80829--- a/include/drm/drm_crtc_helper.h
80830+++ b/include/drm/drm_crtc_helper.h
80831@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
80832 struct drm_connector *connector);
80833 /* disable encoder when not in use - more explicit than dpms off */
80834 void (*disable)(struct drm_encoder *encoder);
80835-};
80836+} __no_const;
80837
80838 /**
80839 * drm_connector_helper_funcs - helper operations for connectors
80840diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
80841index a70d456..6ea07cd 100644
80842--- a/include/drm/i915_pciids.h
80843+++ b/include/drm/i915_pciids.h
80844@@ -37,7 +37,7 @@
80845 */
80846 #define INTEL_VGA_DEVICE(id, info) { \
80847 0x8086, id, \
80848- ~0, ~0, \
80849+ PCI_ANY_ID, PCI_ANY_ID, \
80850 0x030000, 0xff0000, \
80851 (unsigned long) info }
80852
80853diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
80854index 72dcbe8..8db58d7 100644
80855--- a/include/drm/ttm/ttm_memory.h
80856+++ b/include/drm/ttm/ttm_memory.h
80857@@ -48,7 +48,7 @@
80858
80859 struct ttm_mem_shrink {
80860 int (*do_shrink) (struct ttm_mem_shrink *);
80861-};
80862+} __no_const;
80863
80864 /**
80865 * struct ttm_mem_global - Global memory accounting structure.
80866diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
80867index 49a8284..9643967 100644
80868--- a/include/drm/ttm/ttm_page_alloc.h
80869+++ b/include/drm/ttm/ttm_page_alloc.h
80870@@ -80,6 +80,7 @@ void ttm_dma_page_alloc_fini(void);
80871 */
80872 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
80873
80874+struct device;
80875 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
80876 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
80877
80878diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
80879index 4b840e8..155d235 100644
80880--- a/include/keys/asymmetric-subtype.h
80881+++ b/include/keys/asymmetric-subtype.h
80882@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
80883 /* Verify the signature on a key of this subtype (optional) */
80884 int (*verify_signature)(const struct key *key,
80885 const struct public_key_signature *sig);
80886-};
80887+} __do_const;
80888
80889 /**
80890 * asymmetric_key_subtype - Get the subtype from an asymmetric key
80891diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
80892index c1da539..1dcec55 100644
80893--- a/include/linux/atmdev.h
80894+++ b/include/linux/atmdev.h
80895@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
80896 #endif
80897
80898 struct k_atm_aal_stats {
80899-#define __HANDLE_ITEM(i) atomic_t i
80900+#define __HANDLE_ITEM(i) atomic_unchecked_t i
80901 __AAL_STAT_ITEMS
80902 #undef __HANDLE_ITEM
80903 };
80904@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
80905 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
80906 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
80907 struct module *owner;
80908-};
80909+} __do_const ;
80910
80911 struct atmphy_ops {
80912 int (*start)(struct atm_dev *dev);
80913diff --git a/include/linux/audit.h b/include/linux/audit.h
80914index 22cfddb..1514eef 100644
80915--- a/include/linux/audit.h
80916+++ b/include/linux/audit.h
80917@@ -86,7 +86,7 @@ extern unsigned compat_dir_class[];
80918 extern unsigned compat_chattr_class[];
80919 extern unsigned compat_signal_class[];
80920
80921-extern int __weak audit_classify_compat_syscall(int abi, unsigned syscall);
80922+extern int audit_classify_compat_syscall(int abi, unsigned syscall);
80923
80924 /* audit_names->type values */
80925 #define AUDIT_TYPE_UNKNOWN 0 /* we don't know yet */
80926@@ -210,7 +210,7 @@ static inline void audit_ptrace(struct task_struct *t)
80927 extern unsigned int audit_serial(void);
80928 extern int auditsc_get_stamp(struct audit_context *ctx,
80929 struct timespec *t, unsigned int *serial);
80930-extern int audit_set_loginuid(kuid_t loginuid);
80931+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
80932
80933 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
80934 {
80935diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
80936index 61f29e5..e67c658 100644
80937--- a/include/linux/binfmts.h
80938+++ b/include/linux/binfmts.h
80939@@ -44,7 +44,7 @@ struct linux_binprm {
80940 unsigned interp_flags;
80941 unsigned interp_data;
80942 unsigned long loader, exec;
80943-};
80944+} __randomize_layout;
80945
80946 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
80947 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
80948@@ -73,8 +73,10 @@ struct linux_binfmt {
80949 int (*load_binary)(struct linux_binprm *);
80950 int (*load_shlib)(struct file *);
80951 int (*core_dump)(struct coredump_params *cprm);
80952+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
80953+ void (*handle_mmap)(struct file *);
80954 unsigned long min_coredump; /* minimal dump size */
80955-};
80956+} __do_const __randomize_layout;
80957
80958 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
80959
80960diff --git a/include/linux/bitops.h b/include/linux/bitops.h
80961index cbc5833..8123ebc 100644
80962--- a/include/linux/bitops.h
80963+++ b/include/linux/bitops.h
80964@@ -122,7 +122,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
80965 * @word: value to rotate
80966 * @shift: bits to roll
80967 */
80968-static inline __u32 rol32(__u32 word, unsigned int shift)
80969+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
80970 {
80971 return (word << shift) | (word >> (32 - shift));
80972 }
80973@@ -132,7 +132,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
80974 * @word: value to rotate
80975 * @shift: bits to roll
80976 */
80977-static inline __u32 ror32(__u32 word, unsigned int shift)
80978+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
80979 {
80980 return (word >> shift) | (word << (32 - shift));
80981 }
80982@@ -188,7 +188,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
80983 return (__s32)(value << shift) >> shift;
80984 }
80985
80986-static inline unsigned fls_long(unsigned long l)
80987+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
80988 {
80989 if (sizeof(l) == 4)
80990 return fls(l);
80991diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
80992index 518b465..11953e6 100644
80993--- a/include/linux/blkdev.h
80994+++ b/include/linux/blkdev.h
80995@@ -1627,7 +1627,7 @@ struct block_device_operations {
80996 /* this callback is with swap_lock and sometimes page table lock held */
80997 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
80998 struct module *owner;
80999-};
81000+} __do_const;
81001
81002 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
81003 unsigned long);
81004diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
81005index afc1343..9735539 100644
81006--- a/include/linux/blktrace_api.h
81007+++ b/include/linux/blktrace_api.h
81008@@ -25,7 +25,7 @@ struct blk_trace {
81009 struct dentry *dropped_file;
81010 struct dentry *msg_file;
81011 struct list_head running_list;
81012- atomic_t dropped;
81013+ atomic_unchecked_t dropped;
81014 };
81015
81016 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
81017diff --git a/include/linux/cache.h b/include/linux/cache.h
81018index 17e7e82..1d7da26 100644
81019--- a/include/linux/cache.h
81020+++ b/include/linux/cache.h
81021@@ -16,6 +16,14 @@
81022 #define __read_mostly
81023 #endif
81024
81025+#ifndef __read_only
81026+#ifdef CONFIG_PAX_KERNEXEC
81027+#error KERNEXEC requires __read_only
81028+#else
81029+#define __read_only __read_mostly
81030+#endif
81031+#endif
81032+
81033 #ifndef ____cacheline_aligned
81034 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
81035 #endif
81036diff --git a/include/linux/capability.h b/include/linux/capability.h
81037index aa93e5e..985a1b0 100644
81038--- a/include/linux/capability.h
81039+++ b/include/linux/capability.h
81040@@ -214,9 +214,14 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
81041 extern bool capable(int cap);
81042 extern bool ns_capable(struct user_namespace *ns, int cap);
81043 extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
81044+extern bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap);
81045 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
81046+extern bool capable_nolog(int cap);
81047+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
81048
81049 /* audit system wants to get cap info from files as well */
81050 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
81051
81052+extern int is_privileged_binary(const struct dentry *dentry);
81053+
81054 #endif /* !_LINUX_CAPABILITY_H */
81055diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
81056index 8609d57..86e4d79 100644
81057--- a/include/linux/cdrom.h
81058+++ b/include/linux/cdrom.h
81059@@ -87,7 +87,6 @@ struct cdrom_device_ops {
81060
81061 /* driver specifications */
81062 const int capability; /* capability flags */
81063- int n_minors; /* number of active minor devices */
81064 /* handle uniform packets for scsi type devices (scsi,atapi) */
81065 int (*generic_packet) (struct cdrom_device_info *,
81066 struct packet_command *);
81067diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
81068index 4ce9056..86caac6 100644
81069--- a/include/linux/cleancache.h
81070+++ b/include/linux/cleancache.h
81071@@ -31,7 +31,7 @@ struct cleancache_ops {
81072 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
81073 void (*invalidate_inode)(int, struct cleancache_filekey);
81074 void (*invalidate_fs)(int);
81075-};
81076+} __no_const;
81077
81078 extern struct cleancache_ops *
81079 cleancache_register_ops(struct cleancache_ops *ops);
81080diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
81081index 411dd7e..ee38878 100644
81082--- a/include/linux/clk-provider.h
81083+++ b/include/linux/clk-provider.h
81084@@ -180,6 +180,7 @@ struct clk_ops {
81085 void (*init)(struct clk_hw *hw);
81086 int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
81087 };
81088+typedef struct clk_ops __no_const clk_ops_no_const;
81089
81090 /**
81091 * struct clk_init_data - holds init data that's common to all clocks and is
81092diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
81093index 653f0e2..abcafaa 100644
81094--- a/include/linux/clocksource.h
81095+++ b/include/linux/clocksource.h
81096@@ -287,7 +287,7 @@ extern struct clocksource* clocksource_get_next(void);
81097 extern void clocksource_change_rating(struct clocksource *cs, int rating);
81098 extern void clocksource_suspend(void);
81099 extern void clocksource_resume(void);
81100-extern struct clocksource * __init __weak clocksource_default_clock(void);
81101+extern struct clocksource * __init clocksource_default_clock(void);
81102 extern void clocksource_mark_unstable(struct clocksource *cs);
81103
81104 extern u64
81105diff --git a/include/linux/compat.h b/include/linux/compat.h
81106index e649426..a74047b 100644
81107--- a/include/linux/compat.h
81108+++ b/include/linux/compat.h
81109@@ -316,7 +316,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
81110 compat_size_t __user *len_ptr);
81111
81112 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
81113-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
81114+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
81115 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
81116 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
81117 compat_ssize_t msgsz, int msgflg);
81118@@ -436,7 +436,7 @@ extern int compat_ptrace_request(struct task_struct *child,
81119 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
81120 compat_ulong_t addr, compat_ulong_t data);
81121 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
81122- compat_long_t addr, compat_long_t data);
81123+ compat_ulong_t addr, compat_ulong_t data);
81124
81125 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
81126 /*
81127diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
81128index 2507fd2..55203f8 100644
81129--- a/include/linux/compiler-gcc4.h
81130+++ b/include/linux/compiler-gcc4.h
81131@@ -39,9 +39,34 @@
81132 # define __compiletime_warning(message) __attribute__((warning(message)))
81133 # define __compiletime_error(message) __attribute__((error(message)))
81134 #endif /* __CHECKER__ */
81135+
81136+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
81137+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
81138+#define __bos0(ptr) __bos((ptr), 0)
81139+#define __bos1(ptr) __bos((ptr), 1)
81140 #endif /* GCC_VERSION >= 40300 */
81141
81142 #if GCC_VERSION >= 40500
81143+
81144+#ifdef RANDSTRUCT_PLUGIN
81145+#define __randomize_layout __attribute__((randomize_layout))
81146+#define __no_randomize_layout __attribute__((no_randomize_layout))
81147+#endif
81148+
81149+#ifdef CONSTIFY_PLUGIN
81150+#define __no_const __attribute__((no_const))
81151+#define __do_const __attribute__((do_const))
81152+#endif
81153+
81154+#ifdef SIZE_OVERFLOW_PLUGIN
81155+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
81156+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
81157+#endif
81158+
81159+#ifdef LATENT_ENTROPY_PLUGIN
81160+#define __latent_entropy __attribute__((latent_entropy))
81161+#endif
81162+
81163 /*
81164 * Mark a position in code as unreachable. This can be used to
81165 * suppress control flow warnings after asm blocks that transfer
81166diff --git a/include/linux/compiler.h b/include/linux/compiler.h
81167index d5ad7b1..3b74638 100644
81168--- a/include/linux/compiler.h
81169+++ b/include/linux/compiler.h
81170@@ -5,11 +5,14 @@
81171
81172 #ifdef __CHECKER__
81173 # define __user __attribute__((noderef, address_space(1)))
81174+# define __force_user __force __user
81175 # define __kernel __attribute__((address_space(0)))
81176+# define __force_kernel __force __kernel
81177 # define __safe __attribute__((safe))
81178 # define __force __attribute__((force))
81179 # define __nocast __attribute__((nocast))
81180 # define __iomem __attribute__((noderef, address_space(2)))
81181+# define __force_iomem __force __iomem
81182 # define __must_hold(x) __attribute__((context(x,1,1)))
81183 # define __acquires(x) __attribute__((context(x,0,1)))
81184 # define __releases(x) __attribute__((context(x,1,0)))
81185@@ -17,20 +20,37 @@
81186 # define __release(x) __context__(x,-1)
81187 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
81188 # define __percpu __attribute__((noderef, address_space(3)))
81189+# define __force_percpu __force __percpu
81190 #ifdef CONFIG_SPARSE_RCU_POINTER
81191 # define __rcu __attribute__((noderef, address_space(4)))
81192+# define __force_rcu __force __rcu
81193 #else
81194 # define __rcu
81195+# define __force_rcu
81196 #endif
81197 extern void __chk_user_ptr(const volatile void __user *);
81198 extern void __chk_io_ptr(const volatile void __iomem *);
81199 #else
81200-# define __user
81201-# define __kernel
81202+# ifdef CHECKER_PLUGIN
81203+//# define __user
81204+//# define __force_user
81205+//# define __kernel
81206+//# define __force_kernel
81207+# else
81208+# ifdef STRUCTLEAK_PLUGIN
81209+# define __user __attribute__((user))
81210+# else
81211+# define __user
81212+# endif
81213+# define __force_user
81214+# define __kernel
81215+# define __force_kernel
81216+# endif
81217 # define __safe
81218 # define __force
81219 # define __nocast
81220 # define __iomem
81221+# define __force_iomem
81222 # define __chk_user_ptr(x) (void)0
81223 # define __chk_io_ptr(x) (void)0
81224 # define __builtin_warning(x, y...) (1)
81225@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
81226 # define __release(x) (void)0
81227 # define __cond_lock(x,c) (c)
81228 # define __percpu
81229+# define __force_percpu
81230 # define __rcu
81231+# define __force_rcu
81232 #endif
81233
81234 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
81235@@ -286,6 +308,34 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
81236 # define __attribute_const__ /* unimplemented */
81237 #endif
81238
81239+#ifndef __randomize_layout
81240+# define __randomize_layout
81241+#endif
81242+
81243+#ifndef __no_randomize_layout
81244+# define __no_randomize_layout
81245+#endif
81246+
81247+#ifndef __no_const
81248+# define __no_const
81249+#endif
81250+
81251+#ifndef __do_const
81252+# define __do_const
81253+#endif
81254+
81255+#ifndef __size_overflow
81256+# define __size_overflow(...)
81257+#endif
81258+
81259+#ifndef __intentional_overflow
81260+# define __intentional_overflow(...)
81261+#endif
81262+
81263+#ifndef __latent_entropy
81264+# define __latent_entropy
81265+#endif
81266+
81267 /*
81268 * Tell gcc if a function is cold. The compiler will assume any path
81269 * directly leading to the call is unlikely.
81270@@ -295,6 +345,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
81271 #define __cold
81272 #endif
81273
81274+#ifndef __alloc_size
81275+#define __alloc_size(...)
81276+#endif
81277+
81278+#ifndef __bos
81279+#define __bos(ptr, arg)
81280+#endif
81281+
81282+#ifndef __bos0
81283+#define __bos0(ptr)
81284+#endif
81285+
81286+#ifndef __bos1
81287+#define __bos1(ptr)
81288+#endif
81289+
81290 /* Simple shorthand for a section definition */
81291 #ifndef __section
81292 # define __section(S) __attribute__ ((__section__(#S)))
81293@@ -378,7 +444,8 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
81294 * use is to mediate communication between process-level code and irq/NMI
81295 * handlers, all running on the same CPU.
81296 */
81297-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
81298+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
81299+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
81300
81301 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
81302 #ifdef CONFIG_KPROBES
81303diff --git a/include/linux/completion.h b/include/linux/completion.h
81304index 5d5aaae..0ea9b84 100644
81305--- a/include/linux/completion.h
81306+++ b/include/linux/completion.h
81307@@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x)
81308
81309 extern void wait_for_completion(struct completion *);
81310 extern void wait_for_completion_io(struct completion *);
81311-extern int wait_for_completion_interruptible(struct completion *x);
81312-extern int wait_for_completion_killable(struct completion *x);
81313+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
81314+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
81315 extern unsigned long wait_for_completion_timeout(struct completion *x,
81316- unsigned long timeout);
81317+ unsigned long timeout) __intentional_overflow(-1);
81318 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
81319- unsigned long timeout);
81320+ unsigned long timeout) __intentional_overflow(-1);
81321 extern long wait_for_completion_interruptible_timeout(
81322- struct completion *x, unsigned long timeout);
81323+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
81324 extern long wait_for_completion_killable_timeout(
81325- struct completion *x, unsigned long timeout);
81326+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
81327 extern bool try_wait_for_completion(struct completion *x);
81328 extern bool completion_done(struct completion *x);
81329
81330diff --git a/include/linux/configfs.h b/include/linux/configfs.h
81331index 34025df..d94bbbc 100644
81332--- a/include/linux/configfs.h
81333+++ b/include/linux/configfs.h
81334@@ -125,7 +125,7 @@ struct configfs_attribute {
81335 const char *ca_name;
81336 struct module *ca_owner;
81337 umode_t ca_mode;
81338-};
81339+} __do_const;
81340
81341 /*
81342 * Users often need to create attribute structures for their configurable
81343diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
81344index 7d1955a..d86a3ca 100644
81345--- a/include/linux/cpufreq.h
81346+++ b/include/linux/cpufreq.h
81347@@ -203,6 +203,7 @@ struct global_attr {
81348 ssize_t (*store)(struct kobject *a, struct attribute *b,
81349 const char *c, size_t count);
81350 };
81351+typedef struct global_attr __no_const global_attr_no_const;
81352
81353 #define define_one_global_ro(_name) \
81354 static struct global_attr _name = \
81355@@ -269,7 +270,7 @@ struct cpufreq_driver {
81356 bool boost_supported;
81357 bool boost_enabled;
81358 int (*set_boost) (int state);
81359-};
81360+} __do_const;
81361
81362 /* flags */
81363 #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
81364diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
81365index 25e0df6..952dffd 100644
81366--- a/include/linux/cpuidle.h
81367+++ b/include/linux/cpuidle.h
81368@@ -50,7 +50,8 @@ struct cpuidle_state {
81369 int index);
81370
81371 int (*enter_dead) (struct cpuidle_device *dev, int index);
81372-};
81373+} __do_const;
81374+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
81375
81376 /* Idle State Flags */
81377 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
81378@@ -209,7 +210,7 @@ struct cpuidle_governor {
81379 void (*reflect) (struct cpuidle_device *dev, int index);
81380
81381 struct module *owner;
81382-};
81383+} __do_const;
81384
81385 #ifdef CONFIG_CPU_IDLE
81386 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
81387diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
81388index 2997af6..424ddc1 100644
81389--- a/include/linux/cpumask.h
81390+++ b/include/linux/cpumask.h
81391@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
81392 }
81393
81394 /* Valid inputs for n are -1 and 0. */
81395-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
81396+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
81397 {
81398 return n+1;
81399 }
81400
81401-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
81402+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
81403 {
81404 return n+1;
81405 }
81406
81407-static inline unsigned int cpumask_next_and(int n,
81408+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
81409 const struct cpumask *srcp,
81410 const struct cpumask *andp)
81411 {
81412@@ -174,7 +174,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
81413 *
81414 * Returns >= nr_cpu_ids if no further cpus set.
81415 */
81416-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
81417+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
81418 {
81419 /* -1 is a legal arg here. */
81420 if (n != -1)
81421@@ -189,7 +189,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
81422 *
81423 * Returns >= nr_cpu_ids if no further cpus unset.
81424 */
81425-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
81426+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
81427 {
81428 /* -1 is a legal arg here. */
81429 if (n != -1)
81430@@ -197,7 +197,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
81431 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
81432 }
81433
81434-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
81435+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
81436 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
81437 int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
81438
81439diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
81440index 72ab536..3849fce 100644
81441--- a/include/linux/crash_dump.h
81442+++ b/include/linux/crash_dump.h
81443@@ -14,14 +14,13 @@
81444 extern unsigned long long elfcorehdr_addr;
81445 extern unsigned long long elfcorehdr_size;
81446
81447-extern int __weak elfcorehdr_alloc(unsigned long long *addr,
81448- unsigned long long *size);
81449-extern void __weak elfcorehdr_free(unsigned long long addr);
81450-extern ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos);
81451-extern ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
81452-extern int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
81453- unsigned long from, unsigned long pfn,
81454- unsigned long size, pgprot_t prot);
81455+extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size);
81456+extern void elfcorehdr_free(unsigned long long addr);
81457+extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos);
81458+extern ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
81459+extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
81460+ unsigned long from, unsigned long pfn,
81461+ unsigned long size, pgprot_t prot);
81462
81463 extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
81464 unsigned long, int);
81465diff --git a/include/linux/cred.h b/include/linux/cred.h
81466index b2d0820..2ecafd3 100644
81467--- a/include/linux/cred.h
81468+++ b/include/linux/cred.h
81469@@ -35,7 +35,7 @@ struct group_info {
81470 int nblocks;
81471 kgid_t small_block[NGROUPS_SMALL];
81472 kgid_t *blocks[0];
81473-};
81474+} __randomize_layout;
81475
81476 /**
81477 * get_group_info - Get a reference to a group info structure
81478@@ -136,7 +136,7 @@ struct cred {
81479 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
81480 struct group_info *group_info; /* supplementary groups for euid/fsgid */
81481 struct rcu_head rcu; /* RCU deletion hook */
81482-};
81483+} __randomize_layout;
81484
81485 extern void __put_cred(struct cred *);
81486 extern void exit_creds(struct task_struct *);
81487@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
81488 static inline void validate_process_creds(void)
81489 {
81490 }
81491+static inline void validate_task_creds(struct task_struct *task)
81492+{
81493+}
81494 #endif
81495
81496 /**
81497@@ -331,6 +334,7 @@ static inline void put_cred(const struct cred *_cred)
81498
81499 #define task_uid(task) (task_cred_xxx((task), uid))
81500 #define task_euid(task) (task_cred_xxx((task), euid))
81501+#define task_securebits(task) (task_cred_xxx((task), securebits))
81502
81503 #define current_cred_xxx(xxx) \
81504 ({ \
81505diff --git a/include/linux/crypto.h b/include/linux/crypto.h
81506index d45e949..51cf5ea 100644
81507--- a/include/linux/crypto.h
81508+++ b/include/linux/crypto.h
81509@@ -373,7 +373,7 @@ struct cipher_tfm {
81510 const u8 *key, unsigned int keylen);
81511 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
81512 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
81513-};
81514+} __no_const;
81515
81516 struct hash_tfm {
81517 int (*init)(struct hash_desc *desc);
81518@@ -394,13 +394,13 @@ struct compress_tfm {
81519 int (*cot_decompress)(struct crypto_tfm *tfm,
81520 const u8 *src, unsigned int slen,
81521 u8 *dst, unsigned int *dlen);
81522-};
81523+} __no_const;
81524
81525 struct rng_tfm {
81526 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
81527 unsigned int dlen);
81528 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
81529-};
81530+} __no_const;
81531
81532 #define crt_ablkcipher crt_u.ablkcipher
81533 #define crt_aead crt_u.aead
81534diff --git a/include/linux/ctype.h b/include/linux/ctype.h
81535index 653589e..4ef254a 100644
81536--- a/include/linux/ctype.h
81537+++ b/include/linux/ctype.h
81538@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
81539 * Fast implementation of tolower() for internal usage. Do not use in your
81540 * code.
81541 */
81542-static inline char _tolower(const char c)
81543+static inline unsigned char _tolower(const unsigned char c)
81544 {
81545 return c | 0x20;
81546 }
81547diff --git a/include/linux/dcache.h b/include/linux/dcache.h
81548index 75a227c..1456987 100644
81549--- a/include/linux/dcache.h
81550+++ b/include/linux/dcache.h
81551@@ -134,7 +134,7 @@ struct dentry {
81552 } d_u;
81553 struct list_head d_subdirs; /* our children */
81554 struct hlist_node d_alias; /* inode alias list */
81555-};
81556+} __randomize_layout;
81557
81558 /*
81559 * dentry->d_lock spinlock nesting subclasses:
81560diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
81561index 7925bf0..d5143d2 100644
81562--- a/include/linux/decompress/mm.h
81563+++ b/include/linux/decompress/mm.h
81564@@ -77,7 +77,7 @@ static void free(void *where)
81565 * warnings when not needed (indeed large_malloc / large_free are not
81566 * needed by inflate */
81567
81568-#define malloc(a) kmalloc(a, GFP_KERNEL)
81569+#define malloc(a) kmalloc((a), GFP_KERNEL)
81570 #define free(a) kfree(a)
81571
81572 #define large_malloc(a) vmalloc(a)
81573diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
81574index f1863dc..5c26074 100644
81575--- a/include/linux/devfreq.h
81576+++ b/include/linux/devfreq.h
81577@@ -114,7 +114,7 @@ struct devfreq_governor {
81578 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
81579 int (*event_handler)(struct devfreq *devfreq,
81580 unsigned int event, void *data);
81581-};
81582+} __do_const;
81583
81584 /**
81585 * struct devfreq - Device devfreq structure
81586diff --git a/include/linux/device.h b/include/linux/device.h
81587index 43d183a..03b6ba2 100644
81588--- a/include/linux/device.h
81589+++ b/include/linux/device.h
81590@@ -310,7 +310,7 @@ struct subsys_interface {
81591 struct list_head node;
81592 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
81593 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
81594-};
81595+} __do_const;
81596
81597 int subsys_interface_register(struct subsys_interface *sif);
81598 void subsys_interface_unregister(struct subsys_interface *sif);
81599@@ -506,7 +506,7 @@ struct device_type {
81600 void (*release)(struct device *dev);
81601
81602 const struct dev_pm_ops *pm;
81603-};
81604+} __do_const;
81605
81606 /* interface for exporting device attributes */
81607 struct device_attribute {
81608@@ -516,11 +516,12 @@ struct device_attribute {
81609 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
81610 const char *buf, size_t count);
81611 };
81612+typedef struct device_attribute __no_const device_attribute_no_const;
81613
81614 struct dev_ext_attribute {
81615 struct device_attribute attr;
81616 void *var;
81617-};
81618+} __do_const;
81619
81620 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
81621 char *buf);
81622diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
81623index 931b709..89b2d89 100644
81624--- a/include/linux/dma-mapping.h
81625+++ b/include/linux/dma-mapping.h
81626@@ -60,7 +60,7 @@ struct dma_map_ops {
81627 u64 (*get_required_mask)(struct device *dev);
81628 #endif
81629 int is_phys;
81630-};
81631+} __do_const;
81632
81633 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
81634
81635diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
81636index 1f9e642..39e4263 100644
81637--- a/include/linux/dmaengine.h
81638+++ b/include/linux/dmaengine.h
81639@@ -1147,9 +1147,9 @@ struct dma_pinned_list {
81640 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
81641 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
81642
81643-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
81644+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
81645 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
81646-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
81647+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
81648 struct dma_pinned_list *pinned_list, struct page *page,
81649 unsigned int offset, size_t len);
81650
81651diff --git a/include/linux/efi.h b/include/linux/efi.h
81652index 45cb4ff..c9b4912 100644
81653--- a/include/linux/efi.h
81654+++ b/include/linux/efi.h
81655@@ -1036,6 +1036,7 @@ struct efivar_operations {
81656 efi_set_variable_t *set_variable;
81657 efi_query_variable_store_t *query_variable_store;
81658 };
81659+typedef struct efivar_operations __no_const efivar_operations_no_const;
81660
81661 struct efivars {
81662 /*
81663diff --git a/include/linux/elf.h b/include/linux/elf.h
81664index 67a5fa7..b817372 100644
81665--- a/include/linux/elf.h
81666+++ b/include/linux/elf.h
81667@@ -24,6 +24,7 @@ extern Elf32_Dyn _DYNAMIC [];
81668 #define elf_note elf32_note
81669 #define elf_addr_t Elf32_Off
81670 #define Elf_Half Elf32_Half
81671+#define elf_dyn Elf32_Dyn
81672
81673 #else
81674
81675@@ -34,6 +35,7 @@ extern Elf64_Dyn _DYNAMIC [];
81676 #define elf_note elf64_note
81677 #define elf_addr_t Elf64_Off
81678 #define Elf_Half Elf64_Half
81679+#define elf_dyn Elf64_Dyn
81680
81681 #endif
81682
81683diff --git a/include/linux/err.h b/include/linux/err.h
81684index a729120..6ede2c9 100644
81685--- a/include/linux/err.h
81686+++ b/include/linux/err.h
81687@@ -20,12 +20,12 @@
81688
81689 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
81690
81691-static inline void * __must_check ERR_PTR(long error)
81692+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
81693 {
81694 return (void *) error;
81695 }
81696
81697-static inline long __must_check PTR_ERR(__force const void *ptr)
81698+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
81699 {
81700 return (long) ptr;
81701 }
81702diff --git a/include/linux/extcon.h b/include/linux/extcon.h
81703index 36f49c4..a2a1f4c 100644
81704--- a/include/linux/extcon.h
81705+++ b/include/linux/extcon.h
81706@@ -135,7 +135,7 @@ struct extcon_dev {
81707 /* /sys/class/extcon/.../mutually_exclusive/... */
81708 struct attribute_group attr_g_muex;
81709 struct attribute **attrs_muex;
81710- struct device_attribute *d_attrs_muex;
81711+ device_attribute_no_const *d_attrs_muex;
81712 };
81713
81714 /**
81715diff --git a/include/linux/fb.h b/include/linux/fb.h
81716index 09bb7a1..d98870a 100644
81717--- a/include/linux/fb.h
81718+++ b/include/linux/fb.h
81719@@ -305,7 +305,7 @@ struct fb_ops {
81720 /* called at KDB enter and leave time to prepare the console */
81721 int (*fb_debug_enter)(struct fb_info *info);
81722 int (*fb_debug_leave)(struct fb_info *info);
81723-};
81724+} __do_const;
81725
81726 #ifdef CONFIG_FB_TILEBLITTING
81727 #define FB_TILE_CURSOR_NONE 0
81728diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
81729index 230f87b..1fd0485 100644
81730--- a/include/linux/fdtable.h
81731+++ b/include/linux/fdtable.h
81732@@ -100,7 +100,7 @@ struct files_struct *get_files_struct(struct task_struct *);
81733 void put_files_struct(struct files_struct *fs);
81734 void reset_files_struct(struct files_struct *);
81735 int unshare_files(struct files_struct **);
81736-struct files_struct *dup_fd(struct files_struct *, int *);
81737+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
81738 void do_close_on_exec(struct files_struct *);
81739 int iterate_fd(struct files_struct *, unsigned,
81740 int (*)(const void *, struct file *, unsigned),
81741diff --git a/include/linux/filter.h b/include/linux/filter.h
81742index a5227ab..c789945 100644
81743--- a/include/linux/filter.h
81744+++ b/include/linux/filter.h
81745@@ -9,6 +9,11 @@
81746 #include <linux/skbuff.h>
81747 #include <linux/workqueue.h>
81748 #include <uapi/linux/filter.h>
81749+#include <asm/cacheflush.h>
81750+
81751+struct sk_buff;
81752+struct sock;
81753+struct seccomp_data;
81754
81755 /* Internally used and optimized filter representation with extended
81756 * instruction set based on top of classic BPF.
81757@@ -320,20 +325,23 @@ struct sock_fprog_kern {
81758 struct sock_filter *filter;
81759 };
81760
81761-struct sk_buff;
81762-struct sock;
81763-struct seccomp_data;
81764+struct bpf_work_struct {
81765+ struct bpf_prog *prog;
81766+ struct work_struct work;
81767+};
81768
81769 struct bpf_prog {
81770+ u32 pages; /* Number of allocated pages */
81771 u32 jited:1, /* Is our filter JIT'ed? */
81772 len:31; /* Number of filter blocks */
81773 struct sock_fprog_kern *orig_prog; /* Original BPF program */
81774+ struct bpf_work_struct *work; /* Deferred free work struct */
81775 unsigned int (*bpf_func)(const struct sk_buff *skb,
81776 const struct bpf_insn *filter);
81777+ /* Instructions for interpreter */
81778 union {
81779 struct sock_filter insns[0];
81780 struct bpf_insn insnsi[0];
81781- struct work_struct work;
81782 };
81783 };
81784
81785@@ -353,6 +361,26 @@ static inline unsigned int bpf_prog_size(unsigned int proglen)
81786
81787 #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
81788
81789+#ifdef CONFIG_DEBUG_SET_MODULE_RONX
81790+static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
81791+{
81792+ set_memory_ro((unsigned long)fp, fp->pages);
81793+}
81794+
81795+static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
81796+{
81797+ set_memory_rw((unsigned long)fp, fp->pages);
81798+}
81799+#else
81800+static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
81801+{
81802+}
81803+
81804+static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
81805+{
81806+}
81807+#endif /* CONFIG_DEBUG_SET_MODULE_RONX */
81808+
81809 int sk_filter(struct sock *sk, struct sk_buff *skb);
81810
81811 void bpf_prog_select_runtime(struct bpf_prog *fp);
81812@@ -361,6 +389,17 @@ void bpf_prog_free(struct bpf_prog *fp);
81813 int bpf_convert_filter(struct sock_filter *prog, int len,
81814 struct bpf_insn *new_prog, int *new_len);
81815
81816+struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
81817+struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
81818+ gfp_t gfp_extra_flags);
81819+void __bpf_prog_free(struct bpf_prog *fp);
81820+
81821+static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
81822+{
81823+ bpf_prog_unlock_ro(fp);
81824+ __bpf_prog_free(fp);
81825+}
81826+
81827 int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
81828 void bpf_prog_destroy(struct bpf_prog *fp);
81829
81830@@ -450,7 +489,7 @@ static inline void bpf_jit_compile(struct bpf_prog *fp)
81831
81832 static inline void bpf_jit_free(struct bpf_prog *fp)
81833 {
81834- kfree(fp);
81835+ bpf_prog_unlock_free(fp);
81836 }
81837 #endif /* CONFIG_BPF_JIT */
81838
81839diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
81840index 8293262..2b3b8bd 100644
81841--- a/include/linux/frontswap.h
81842+++ b/include/linux/frontswap.h
81843@@ -11,7 +11,7 @@ struct frontswap_ops {
81844 int (*load)(unsigned, pgoff_t, struct page *);
81845 void (*invalidate_page)(unsigned, pgoff_t);
81846 void (*invalidate_area)(unsigned);
81847-};
81848+} __no_const;
81849
81850 extern bool frontswap_enabled;
81851 extern struct frontswap_ops *
81852diff --git a/include/linux/fs.h b/include/linux/fs.h
81853index 9418772..0155807 100644
81854--- a/include/linux/fs.h
81855+++ b/include/linux/fs.h
81856@@ -401,7 +401,7 @@ struct address_space {
81857 spinlock_t private_lock; /* for use by the address_space */
81858 struct list_head private_list; /* ditto */
81859 void *private_data; /* ditto */
81860-} __attribute__((aligned(sizeof(long))));
81861+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
81862 /*
81863 * On most architectures that alignment is already the case; but
81864 * must be enforced here for CRIS, to let the least significant bit
81865@@ -444,7 +444,7 @@ struct block_device {
81866 int bd_fsfreeze_count;
81867 /* Mutex for freeze */
81868 struct mutex bd_fsfreeze_mutex;
81869-};
81870+} __randomize_layout;
81871
81872 /*
81873 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
81874@@ -613,7 +613,7 @@ struct inode {
81875 #endif
81876
81877 void *i_private; /* fs or device private pointer */
81878-};
81879+} __randomize_layout;
81880
81881 static inline int inode_unhashed(struct inode *inode)
81882 {
81883@@ -806,7 +806,7 @@ struct file {
81884 struct list_head f_tfile_llink;
81885 #endif /* #ifdef CONFIG_EPOLL */
81886 struct address_space *f_mapping;
81887-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
81888+} __attribute__((aligned(4))) __randomize_layout; /* lest something weird decides that 2 is OK */
81889
81890 struct file_handle {
81891 __u32 handle_bytes;
81892@@ -934,7 +934,7 @@ struct file_lock {
81893 int state; /* state of grant or error if -ve */
81894 } afs;
81895 } fl_u;
81896-};
81897+} __randomize_layout;
81898
81899 /* The following constant reflects the upper bound of the file/locking space */
81900 #ifndef OFFSET_MAX
81901@@ -1284,7 +1284,7 @@ struct super_block {
81902 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
81903 struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
81904 struct rcu_head rcu;
81905-};
81906+} __randomize_layout;
81907
81908 extern struct timespec current_fs_time(struct super_block *sb);
81909
81910@@ -1510,7 +1510,8 @@ struct file_operations {
81911 long (*fallocate)(struct file *file, int mode, loff_t offset,
81912 loff_t len);
81913 int (*show_fdinfo)(struct seq_file *m, struct file *f);
81914-};
81915+} __do_const __randomize_layout;
81916+typedef struct file_operations __no_const file_operations_no_const;
81917
81918 struct inode_operations {
81919 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
81920@@ -2796,4 +2797,14 @@ static inline bool dir_relax(struct inode *inode)
81921 return !IS_DEADDIR(inode);
81922 }
81923
81924+static inline bool is_sidechannel_device(const struct inode *inode)
81925+{
81926+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
81927+ umode_t mode = inode->i_mode;
81928+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
81929+#else
81930+ return false;
81931+#endif
81932+}
81933+
81934 #endif /* _LINUX_FS_H */
81935diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
81936index 0efc3e6..fd23610 100644
81937--- a/include/linux/fs_struct.h
81938+++ b/include/linux/fs_struct.h
81939@@ -6,13 +6,13 @@
81940 #include <linux/seqlock.h>
81941
81942 struct fs_struct {
81943- int users;
81944+ atomic_t users;
81945 spinlock_t lock;
81946 seqcount_t seq;
81947 int umask;
81948 int in_exec;
81949 struct path root, pwd;
81950-};
81951+} __randomize_layout;
81952
81953 extern struct kmem_cache *fs_cachep;
81954
81955diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
81956index 7714849..a4a5c7a 100644
81957--- a/include/linux/fscache-cache.h
81958+++ b/include/linux/fscache-cache.h
81959@@ -113,7 +113,7 @@ struct fscache_operation {
81960 fscache_operation_release_t release;
81961 };
81962
81963-extern atomic_t fscache_op_debug_id;
81964+extern atomic_unchecked_t fscache_op_debug_id;
81965 extern void fscache_op_work_func(struct work_struct *work);
81966
81967 extern void fscache_enqueue_operation(struct fscache_operation *);
81968@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
81969 INIT_WORK(&op->work, fscache_op_work_func);
81970 atomic_set(&op->usage, 1);
81971 op->state = FSCACHE_OP_ST_INITIALISED;
81972- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
81973+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
81974 op->processor = processor;
81975 op->release = release;
81976 INIT_LIST_HEAD(&op->pend_link);
81977diff --git a/include/linux/fscache.h b/include/linux/fscache.h
81978index 115bb81..e7b812b 100644
81979--- a/include/linux/fscache.h
81980+++ b/include/linux/fscache.h
81981@@ -152,7 +152,7 @@ struct fscache_cookie_def {
81982 * - this is mandatory for any object that may have data
81983 */
81984 void (*now_uncached)(void *cookie_netfs_data);
81985-};
81986+} __do_const;
81987
81988 /*
81989 * fscache cached network filesystem type
81990diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
81991index 1c804b0..1432c2b 100644
81992--- a/include/linux/fsnotify.h
81993+++ b/include/linux/fsnotify.h
81994@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
81995 struct inode *inode = file_inode(file);
81996 __u32 mask = FS_ACCESS;
81997
81998+ if (is_sidechannel_device(inode))
81999+ return;
82000+
82001 if (S_ISDIR(inode->i_mode))
82002 mask |= FS_ISDIR;
82003
82004@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
82005 struct inode *inode = file_inode(file);
82006 __u32 mask = FS_MODIFY;
82007
82008+ if (is_sidechannel_device(inode))
82009+ return;
82010+
82011 if (S_ISDIR(inode->i_mode))
82012 mask |= FS_ISDIR;
82013
82014@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
82015 */
82016 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
82017 {
82018- return kstrdup(name, GFP_KERNEL);
82019+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
82020 }
82021
82022 /*
82023diff --git a/include/linux/genhd.h b/include/linux/genhd.h
82024index ec274e0..e678159 100644
82025--- a/include/linux/genhd.h
82026+++ b/include/linux/genhd.h
82027@@ -194,7 +194,7 @@ struct gendisk {
82028 struct kobject *slave_dir;
82029
82030 struct timer_rand_state *random;
82031- atomic_t sync_io; /* RAID */
82032+ atomic_unchecked_t sync_io; /* RAID */
82033 struct disk_events *ev;
82034 #ifdef CONFIG_BLK_DEV_INTEGRITY
82035 struct blk_integrity *integrity;
82036@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
82037 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
82038
82039 /* drivers/char/random.c */
82040-extern void add_disk_randomness(struct gendisk *disk);
82041+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
82042 extern void rand_initialize_disk(struct gendisk *disk);
82043
82044 static inline sector_t get_start_sect(struct block_device *bdev)
82045diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
82046index c0894dd..2fbf10c 100644
82047--- a/include/linux/genl_magic_func.h
82048+++ b/include/linux/genl_magic_func.h
82049@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
82050 },
82051
82052 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
82053-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
82054+static struct genl_ops ZZZ_genl_ops[] = {
82055 #include GENL_MAGIC_INCLUDE_FILE
82056 };
82057
82058diff --git a/include/linux/gfp.h b/include/linux/gfp.h
82059index 5e7219d..b1ed627 100644
82060--- a/include/linux/gfp.h
82061+++ b/include/linux/gfp.h
82062@@ -34,6 +34,13 @@ struct vm_area_struct;
82063 #define ___GFP_NO_KSWAPD 0x400000u
82064 #define ___GFP_OTHER_NODE 0x800000u
82065 #define ___GFP_WRITE 0x1000000u
82066+
82067+#ifdef CONFIG_PAX_USERCOPY_SLABS
82068+#define ___GFP_USERCOPY 0x2000000u
82069+#else
82070+#define ___GFP_USERCOPY 0
82071+#endif
82072+
82073 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
82074
82075 /*
82076@@ -90,6 +97,7 @@ struct vm_area_struct;
82077 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
82078 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
82079 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
82080+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
82081
82082 /*
82083 * This may seem redundant, but it's a way of annotating false positives vs.
82084@@ -97,7 +105,7 @@ struct vm_area_struct;
82085 */
82086 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
82087
82088-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
82089+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
82090 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
82091
82092 /* This equals 0, but use constants in case they ever change */
82093@@ -155,6 +163,8 @@ struct vm_area_struct;
82094 /* 4GB DMA on some platforms */
82095 #define GFP_DMA32 __GFP_DMA32
82096
82097+#define GFP_USERCOPY __GFP_USERCOPY
82098+
82099 /* Convert GFP flags to their corresponding migrate type */
82100 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
82101 {
82102diff --git a/include/linux/gracl.h b/include/linux/gracl.h
82103new file mode 100644
82104index 0000000..edb2cb6
82105--- /dev/null
82106+++ b/include/linux/gracl.h
82107@@ -0,0 +1,340 @@
82108+#ifndef GR_ACL_H
82109+#define GR_ACL_H
82110+
82111+#include <linux/grdefs.h>
82112+#include <linux/resource.h>
82113+#include <linux/capability.h>
82114+#include <linux/dcache.h>
82115+#include <asm/resource.h>
82116+
82117+/* Major status information */
82118+
82119+#define GR_VERSION "grsecurity 3.0"
82120+#define GRSECURITY_VERSION 0x3000
82121+
82122+enum {
82123+ GR_SHUTDOWN = 0,
82124+ GR_ENABLE = 1,
82125+ GR_SPROLE = 2,
82126+ GR_OLDRELOAD = 3,
82127+ GR_SEGVMOD = 4,
82128+ GR_STATUS = 5,
82129+ GR_UNSPROLE = 6,
82130+ GR_PASSSET = 7,
82131+ GR_SPROLEPAM = 8,
82132+ GR_RELOAD = 9,
82133+};
82134+
82135+/* Password setup definitions
82136+ * kernel/grhash.c */
82137+enum {
82138+ GR_PW_LEN = 128,
82139+ GR_SALT_LEN = 16,
82140+ GR_SHA_LEN = 32,
82141+};
82142+
82143+enum {
82144+ GR_SPROLE_LEN = 64,
82145+};
82146+
82147+enum {
82148+ GR_NO_GLOB = 0,
82149+ GR_REG_GLOB,
82150+ GR_CREATE_GLOB
82151+};
82152+
82153+#define GR_NLIMITS 32
82154+
82155+/* Begin Data Structures */
82156+
82157+struct sprole_pw {
82158+ unsigned char *rolename;
82159+ unsigned char salt[GR_SALT_LEN];
82160+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
82161+};
82162+
82163+struct name_entry {
82164+ __u32 key;
82165+ ino_t inode;
82166+ dev_t device;
82167+ char *name;
82168+ __u16 len;
82169+ __u8 deleted;
82170+ struct name_entry *prev;
82171+ struct name_entry *next;
82172+};
82173+
82174+struct inodev_entry {
82175+ struct name_entry *nentry;
82176+ struct inodev_entry *prev;
82177+ struct inodev_entry *next;
82178+};
82179+
82180+struct acl_role_db {
82181+ struct acl_role_label **r_hash;
82182+ __u32 r_size;
82183+};
82184+
82185+struct inodev_db {
82186+ struct inodev_entry **i_hash;
82187+ __u32 i_size;
82188+};
82189+
82190+struct name_db {
82191+ struct name_entry **n_hash;
82192+ __u32 n_size;
82193+};
82194+
82195+struct crash_uid {
82196+ uid_t uid;
82197+ unsigned long expires;
82198+};
82199+
82200+struct gr_hash_struct {
82201+ void **table;
82202+ void **nametable;
82203+ void *first;
82204+ __u32 table_size;
82205+ __u32 used_size;
82206+ int type;
82207+};
82208+
82209+/* Userspace Grsecurity ACL data structures */
82210+
82211+struct acl_subject_label {
82212+ char *filename;
82213+ ino_t inode;
82214+ dev_t device;
82215+ __u32 mode;
82216+ kernel_cap_t cap_mask;
82217+ kernel_cap_t cap_lower;
82218+ kernel_cap_t cap_invert_audit;
82219+
82220+ struct rlimit res[GR_NLIMITS];
82221+ __u32 resmask;
82222+
82223+ __u8 user_trans_type;
82224+ __u8 group_trans_type;
82225+ uid_t *user_transitions;
82226+ gid_t *group_transitions;
82227+ __u16 user_trans_num;
82228+ __u16 group_trans_num;
82229+
82230+ __u32 sock_families[2];
82231+ __u32 ip_proto[8];
82232+ __u32 ip_type;
82233+ struct acl_ip_label **ips;
82234+ __u32 ip_num;
82235+ __u32 inaddr_any_override;
82236+
82237+ __u32 crashes;
82238+ unsigned long expires;
82239+
82240+ struct acl_subject_label *parent_subject;
82241+ struct gr_hash_struct *hash;
82242+ struct acl_subject_label *prev;
82243+ struct acl_subject_label *next;
82244+
82245+ struct acl_object_label **obj_hash;
82246+ __u32 obj_hash_size;
82247+ __u16 pax_flags;
82248+};
82249+
82250+struct role_allowed_ip {
82251+ __u32 addr;
82252+ __u32 netmask;
82253+
82254+ struct role_allowed_ip *prev;
82255+ struct role_allowed_ip *next;
82256+};
82257+
82258+struct role_transition {
82259+ char *rolename;
82260+
82261+ struct role_transition *prev;
82262+ struct role_transition *next;
82263+};
82264+
82265+struct acl_role_label {
82266+ char *rolename;
82267+ uid_t uidgid;
82268+ __u16 roletype;
82269+
82270+ __u16 auth_attempts;
82271+ unsigned long expires;
82272+
82273+ struct acl_subject_label *root_label;
82274+ struct gr_hash_struct *hash;
82275+
82276+ struct acl_role_label *prev;
82277+ struct acl_role_label *next;
82278+
82279+ struct role_transition *transitions;
82280+ struct role_allowed_ip *allowed_ips;
82281+ uid_t *domain_children;
82282+ __u16 domain_child_num;
82283+
82284+ umode_t umask;
82285+
82286+ struct acl_subject_label **subj_hash;
82287+ __u32 subj_hash_size;
82288+};
82289+
82290+struct user_acl_role_db {
82291+ struct acl_role_label **r_table;
82292+ __u32 num_pointers; /* Number of allocations to track */
82293+ __u32 num_roles; /* Number of roles */
82294+ __u32 num_domain_children; /* Number of domain children */
82295+ __u32 num_subjects; /* Number of subjects */
82296+ __u32 num_objects; /* Number of objects */
82297+};
82298+
82299+struct acl_object_label {
82300+ char *filename;
82301+ ino_t inode;
82302+ dev_t device;
82303+ __u32 mode;
82304+
82305+ struct acl_subject_label *nested;
82306+ struct acl_object_label *globbed;
82307+
82308+ /* next two structures not used */
82309+
82310+ struct acl_object_label *prev;
82311+ struct acl_object_label *next;
82312+};
82313+
82314+struct acl_ip_label {
82315+ char *iface;
82316+ __u32 addr;
82317+ __u32 netmask;
82318+ __u16 low, high;
82319+ __u8 mode;
82320+ __u32 type;
82321+ __u32 proto[8];
82322+
82323+ /* next two structures not used */
82324+
82325+ struct acl_ip_label *prev;
82326+ struct acl_ip_label *next;
82327+};
82328+
82329+struct gr_arg {
82330+ struct user_acl_role_db role_db;
82331+ unsigned char pw[GR_PW_LEN];
82332+ unsigned char salt[GR_SALT_LEN];
82333+ unsigned char sum[GR_SHA_LEN];
82334+ unsigned char sp_role[GR_SPROLE_LEN];
82335+ struct sprole_pw *sprole_pws;
82336+ dev_t segv_device;
82337+ ino_t segv_inode;
82338+ uid_t segv_uid;
82339+ __u16 num_sprole_pws;
82340+ __u16 mode;
82341+};
82342+
82343+struct gr_arg_wrapper {
82344+ struct gr_arg *arg;
82345+ __u32 version;
82346+ __u32 size;
82347+};
82348+
82349+struct subject_map {
82350+ struct acl_subject_label *user;
82351+ struct acl_subject_label *kernel;
82352+ struct subject_map *prev;
82353+ struct subject_map *next;
82354+};
82355+
82356+struct acl_subj_map_db {
82357+ struct subject_map **s_hash;
82358+ __u32 s_size;
82359+};
82360+
82361+struct gr_policy_state {
82362+ struct sprole_pw **acl_special_roles;
82363+ __u16 num_sprole_pws;
82364+ struct acl_role_label *kernel_role;
82365+ struct acl_role_label *role_list;
82366+ struct acl_role_label *default_role;
82367+ struct acl_role_db acl_role_set;
82368+ struct acl_subj_map_db subj_map_set;
82369+ struct name_db name_set;
82370+ struct inodev_db inodev_set;
82371+};
82372+
82373+struct gr_alloc_state {
82374+ unsigned long alloc_stack_next;
82375+ unsigned long alloc_stack_size;
82376+ void **alloc_stack;
82377+};
82378+
82379+struct gr_reload_state {
82380+ struct gr_policy_state oldpolicy;
82381+ struct gr_alloc_state oldalloc;
82382+ struct gr_policy_state newpolicy;
82383+ struct gr_alloc_state newalloc;
82384+ struct gr_policy_state *oldpolicy_ptr;
82385+ struct gr_alloc_state *oldalloc_ptr;
82386+ unsigned char oldmode;
82387+};
82388+
82389+/* End Data Structures Section */
82390+
82391+/* Hash functions generated by empirical testing by Brad Spengler
82392+ Makes good use of the low bits of the inode. Generally 0-1 times
82393+ in loop for successful match. 0-3 for unsuccessful match.
82394+ Shift/add algorithm with modulus of table size and an XOR*/
82395+
82396+static __inline__ unsigned int
82397+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
82398+{
82399+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
82400+}
82401+
82402+ static __inline__ unsigned int
82403+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
82404+{
82405+ return ((const unsigned long)userp % sz);
82406+}
82407+
82408+static __inline__ unsigned int
82409+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
82410+{
82411+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
82412+}
82413+
82414+static __inline__ unsigned int
82415+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
82416+{
82417+ return full_name_hash((const unsigned char *)name, len) % sz;
82418+}
82419+
82420+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
82421+ subj = NULL; \
82422+ iter = 0; \
82423+ while (iter < role->subj_hash_size) { \
82424+ if (subj == NULL) \
82425+ subj = role->subj_hash[iter]; \
82426+ if (subj == NULL) { \
82427+ iter++; \
82428+ continue; \
82429+ }
82430+
82431+#define FOR_EACH_SUBJECT_END(subj,iter) \
82432+ subj = subj->next; \
82433+ if (subj == NULL) \
82434+ iter++; \
82435+ }
82436+
82437+
82438+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
82439+ subj = role->hash->first; \
82440+ while (subj != NULL) {
82441+
82442+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
82443+ subj = subj->next; \
82444+ }
82445+
82446+#endif
82447+
82448diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
82449new file mode 100644
82450index 0000000..33ebd1f
82451--- /dev/null
82452+++ b/include/linux/gracl_compat.h
82453@@ -0,0 +1,156 @@
82454+#ifndef GR_ACL_COMPAT_H
82455+#define GR_ACL_COMPAT_H
82456+
82457+#include <linux/resource.h>
82458+#include <asm/resource.h>
82459+
82460+struct sprole_pw_compat {
82461+ compat_uptr_t rolename;
82462+ unsigned char salt[GR_SALT_LEN];
82463+ unsigned char sum[GR_SHA_LEN];
82464+};
82465+
82466+struct gr_hash_struct_compat {
82467+ compat_uptr_t table;
82468+ compat_uptr_t nametable;
82469+ compat_uptr_t first;
82470+ __u32 table_size;
82471+ __u32 used_size;
82472+ int type;
82473+};
82474+
82475+struct acl_subject_label_compat {
82476+ compat_uptr_t filename;
82477+ compat_ino_t inode;
82478+ __u32 device;
82479+ __u32 mode;
82480+ kernel_cap_t cap_mask;
82481+ kernel_cap_t cap_lower;
82482+ kernel_cap_t cap_invert_audit;
82483+
82484+ struct compat_rlimit res[GR_NLIMITS];
82485+ __u32 resmask;
82486+
82487+ __u8 user_trans_type;
82488+ __u8 group_trans_type;
82489+ compat_uptr_t user_transitions;
82490+ compat_uptr_t group_transitions;
82491+ __u16 user_trans_num;
82492+ __u16 group_trans_num;
82493+
82494+ __u32 sock_families[2];
82495+ __u32 ip_proto[8];
82496+ __u32 ip_type;
82497+ compat_uptr_t ips;
82498+ __u32 ip_num;
82499+ __u32 inaddr_any_override;
82500+
82501+ __u32 crashes;
82502+ compat_ulong_t expires;
82503+
82504+ compat_uptr_t parent_subject;
82505+ compat_uptr_t hash;
82506+ compat_uptr_t prev;
82507+ compat_uptr_t next;
82508+
82509+ compat_uptr_t obj_hash;
82510+ __u32 obj_hash_size;
82511+ __u16 pax_flags;
82512+};
82513+
82514+struct role_allowed_ip_compat {
82515+ __u32 addr;
82516+ __u32 netmask;
82517+
82518+ compat_uptr_t prev;
82519+ compat_uptr_t next;
82520+};
82521+
82522+struct role_transition_compat {
82523+ compat_uptr_t rolename;
82524+
82525+ compat_uptr_t prev;
82526+ compat_uptr_t next;
82527+};
82528+
82529+struct acl_role_label_compat {
82530+ compat_uptr_t rolename;
82531+ uid_t uidgid;
82532+ __u16 roletype;
82533+
82534+ __u16 auth_attempts;
82535+ compat_ulong_t expires;
82536+
82537+ compat_uptr_t root_label;
82538+ compat_uptr_t hash;
82539+
82540+ compat_uptr_t prev;
82541+ compat_uptr_t next;
82542+
82543+ compat_uptr_t transitions;
82544+ compat_uptr_t allowed_ips;
82545+ compat_uptr_t domain_children;
82546+ __u16 domain_child_num;
82547+
82548+ umode_t umask;
82549+
82550+ compat_uptr_t subj_hash;
82551+ __u32 subj_hash_size;
82552+};
82553+
82554+struct user_acl_role_db_compat {
82555+ compat_uptr_t r_table;
82556+ __u32 num_pointers;
82557+ __u32 num_roles;
82558+ __u32 num_domain_children;
82559+ __u32 num_subjects;
82560+ __u32 num_objects;
82561+};
82562+
82563+struct acl_object_label_compat {
82564+ compat_uptr_t filename;
82565+ compat_ino_t inode;
82566+ __u32 device;
82567+ __u32 mode;
82568+
82569+ compat_uptr_t nested;
82570+ compat_uptr_t globbed;
82571+
82572+ compat_uptr_t prev;
82573+ compat_uptr_t next;
82574+};
82575+
82576+struct acl_ip_label_compat {
82577+ compat_uptr_t iface;
82578+ __u32 addr;
82579+ __u32 netmask;
82580+ __u16 low, high;
82581+ __u8 mode;
82582+ __u32 type;
82583+ __u32 proto[8];
82584+
82585+ compat_uptr_t prev;
82586+ compat_uptr_t next;
82587+};
82588+
82589+struct gr_arg_compat {
82590+ struct user_acl_role_db_compat role_db;
82591+ unsigned char pw[GR_PW_LEN];
82592+ unsigned char salt[GR_SALT_LEN];
82593+ unsigned char sum[GR_SHA_LEN];
82594+ unsigned char sp_role[GR_SPROLE_LEN];
82595+ compat_uptr_t sprole_pws;
82596+ __u32 segv_device;
82597+ compat_ino_t segv_inode;
82598+ uid_t segv_uid;
82599+ __u16 num_sprole_pws;
82600+ __u16 mode;
82601+};
82602+
82603+struct gr_arg_wrapper_compat {
82604+ compat_uptr_t arg;
82605+ __u32 version;
82606+ __u32 size;
82607+};
82608+
82609+#endif
82610diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
82611new file mode 100644
82612index 0000000..323ecf2
82613--- /dev/null
82614+++ b/include/linux/gralloc.h
82615@@ -0,0 +1,9 @@
82616+#ifndef __GRALLOC_H
82617+#define __GRALLOC_H
82618+
82619+void acl_free_all(void);
82620+int acl_alloc_stack_init(unsigned long size);
82621+void *acl_alloc(unsigned long len);
82622+void *acl_alloc_num(unsigned long num, unsigned long len);
82623+
82624+#endif
82625diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
82626new file mode 100644
82627index 0000000..be66033
82628--- /dev/null
82629+++ b/include/linux/grdefs.h
82630@@ -0,0 +1,140 @@
82631+#ifndef GRDEFS_H
82632+#define GRDEFS_H
82633+
82634+/* Begin grsecurity status declarations */
82635+
82636+enum {
82637+ GR_READY = 0x01,
82638+ GR_STATUS_INIT = 0x00 // disabled state
82639+};
82640+
82641+/* Begin ACL declarations */
82642+
82643+/* Role flags */
82644+
82645+enum {
82646+ GR_ROLE_USER = 0x0001,
82647+ GR_ROLE_GROUP = 0x0002,
82648+ GR_ROLE_DEFAULT = 0x0004,
82649+ GR_ROLE_SPECIAL = 0x0008,
82650+ GR_ROLE_AUTH = 0x0010,
82651+ GR_ROLE_NOPW = 0x0020,
82652+ GR_ROLE_GOD = 0x0040,
82653+ GR_ROLE_LEARN = 0x0080,
82654+ GR_ROLE_TPE = 0x0100,
82655+ GR_ROLE_DOMAIN = 0x0200,
82656+ GR_ROLE_PAM = 0x0400,
82657+ GR_ROLE_PERSIST = 0x0800
82658+};
82659+
82660+/* ACL Subject and Object mode flags */
82661+enum {
82662+ GR_DELETED = 0x80000000
82663+};
82664+
82665+/* ACL Object-only mode flags */
82666+enum {
82667+ GR_READ = 0x00000001,
82668+ GR_APPEND = 0x00000002,
82669+ GR_WRITE = 0x00000004,
82670+ GR_EXEC = 0x00000008,
82671+ GR_FIND = 0x00000010,
82672+ GR_INHERIT = 0x00000020,
82673+ GR_SETID = 0x00000040,
82674+ GR_CREATE = 0x00000080,
82675+ GR_DELETE = 0x00000100,
82676+ GR_LINK = 0x00000200,
82677+ GR_AUDIT_READ = 0x00000400,
82678+ GR_AUDIT_APPEND = 0x00000800,
82679+ GR_AUDIT_WRITE = 0x00001000,
82680+ GR_AUDIT_EXEC = 0x00002000,
82681+ GR_AUDIT_FIND = 0x00004000,
82682+ GR_AUDIT_INHERIT= 0x00008000,
82683+ GR_AUDIT_SETID = 0x00010000,
82684+ GR_AUDIT_CREATE = 0x00020000,
82685+ GR_AUDIT_DELETE = 0x00040000,
82686+ GR_AUDIT_LINK = 0x00080000,
82687+ GR_PTRACERD = 0x00100000,
82688+ GR_NOPTRACE = 0x00200000,
82689+ GR_SUPPRESS = 0x00400000,
82690+ GR_NOLEARN = 0x00800000,
82691+ GR_INIT_TRANSFER= 0x01000000
82692+};
82693+
82694+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
82695+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
82696+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
82697+
82698+/* ACL subject-only mode flags */
82699+enum {
82700+ GR_KILL = 0x00000001,
82701+ GR_VIEW = 0x00000002,
82702+ GR_PROTECTED = 0x00000004,
82703+ GR_LEARN = 0x00000008,
82704+ GR_OVERRIDE = 0x00000010,
82705+ /* just a placeholder, this mode is only used in userspace */
82706+ GR_DUMMY = 0x00000020,
82707+ GR_PROTSHM = 0x00000040,
82708+ GR_KILLPROC = 0x00000080,
82709+ GR_KILLIPPROC = 0x00000100,
82710+ /* just a placeholder, this mode is only used in userspace */
82711+ GR_NOTROJAN = 0x00000200,
82712+ GR_PROTPROCFD = 0x00000400,
82713+ GR_PROCACCT = 0x00000800,
82714+ GR_RELAXPTRACE = 0x00001000,
82715+ //GR_NESTED = 0x00002000,
82716+ GR_INHERITLEARN = 0x00004000,
82717+ GR_PROCFIND = 0x00008000,
82718+ GR_POVERRIDE = 0x00010000,
82719+ GR_KERNELAUTH = 0x00020000,
82720+ GR_ATSECURE = 0x00040000,
82721+ GR_SHMEXEC = 0x00080000
82722+};
82723+
82724+enum {
82725+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
82726+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
82727+ GR_PAX_ENABLE_MPROTECT = 0x0004,
82728+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
82729+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
82730+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
82731+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
82732+ GR_PAX_DISABLE_MPROTECT = 0x0400,
82733+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
82734+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
82735+};
82736+
82737+enum {
82738+ GR_ID_USER = 0x01,
82739+ GR_ID_GROUP = 0x02,
82740+};
82741+
82742+enum {
82743+ GR_ID_ALLOW = 0x01,
82744+ GR_ID_DENY = 0x02,
82745+};
82746+
82747+#define GR_CRASH_RES 31
82748+#define GR_UIDTABLE_MAX 500
82749+
82750+/* begin resource learning section */
82751+enum {
82752+ GR_RLIM_CPU_BUMP = 60,
82753+ GR_RLIM_FSIZE_BUMP = 50000,
82754+ GR_RLIM_DATA_BUMP = 10000,
82755+ GR_RLIM_STACK_BUMP = 1000,
82756+ GR_RLIM_CORE_BUMP = 10000,
82757+ GR_RLIM_RSS_BUMP = 500000,
82758+ GR_RLIM_NPROC_BUMP = 1,
82759+ GR_RLIM_NOFILE_BUMP = 5,
82760+ GR_RLIM_MEMLOCK_BUMP = 50000,
82761+ GR_RLIM_AS_BUMP = 500000,
82762+ GR_RLIM_LOCKS_BUMP = 2,
82763+ GR_RLIM_SIGPENDING_BUMP = 5,
82764+ GR_RLIM_MSGQUEUE_BUMP = 10000,
82765+ GR_RLIM_NICE_BUMP = 1,
82766+ GR_RLIM_RTPRIO_BUMP = 1,
82767+ GR_RLIM_RTTIME_BUMP = 1000000
82768+};
82769+
82770+#endif
82771diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
82772new file mode 100644
82773index 0000000..d25522e
82774--- /dev/null
82775+++ b/include/linux/grinternal.h
82776@@ -0,0 +1,229 @@
82777+#ifndef __GRINTERNAL_H
82778+#define __GRINTERNAL_H
82779+
82780+#ifdef CONFIG_GRKERNSEC
82781+
82782+#include <linux/fs.h>
82783+#include <linux/mnt_namespace.h>
82784+#include <linux/nsproxy.h>
82785+#include <linux/gracl.h>
82786+#include <linux/grdefs.h>
82787+#include <linux/grmsg.h>
82788+
82789+void gr_add_learn_entry(const char *fmt, ...)
82790+ __attribute__ ((format (printf, 1, 2)));
82791+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
82792+ const struct vfsmount *mnt);
82793+__u32 gr_check_create(const struct dentry *new_dentry,
82794+ const struct dentry *parent,
82795+ const struct vfsmount *mnt, const __u32 mode);
82796+int gr_check_protected_task(const struct task_struct *task);
82797+__u32 to_gr_audit(const __u32 reqmode);
82798+int gr_set_acls(const int type);
82799+int gr_acl_is_enabled(void);
82800+char gr_roletype_to_char(void);
82801+
82802+void gr_handle_alertkill(struct task_struct *task);
82803+char *gr_to_filename(const struct dentry *dentry,
82804+ const struct vfsmount *mnt);
82805+char *gr_to_filename1(const struct dentry *dentry,
82806+ const struct vfsmount *mnt);
82807+char *gr_to_filename2(const struct dentry *dentry,
82808+ const struct vfsmount *mnt);
82809+char *gr_to_filename3(const struct dentry *dentry,
82810+ const struct vfsmount *mnt);
82811+
82812+extern int grsec_enable_ptrace_readexec;
82813+extern int grsec_enable_harden_ptrace;
82814+extern int grsec_enable_link;
82815+extern int grsec_enable_fifo;
82816+extern int grsec_enable_execve;
82817+extern int grsec_enable_shm;
82818+extern int grsec_enable_execlog;
82819+extern int grsec_enable_signal;
82820+extern int grsec_enable_audit_ptrace;
82821+extern int grsec_enable_forkfail;
82822+extern int grsec_enable_time;
82823+extern int grsec_enable_rofs;
82824+extern int grsec_deny_new_usb;
82825+extern int grsec_enable_chroot_shmat;
82826+extern int grsec_enable_chroot_mount;
82827+extern int grsec_enable_chroot_double;
82828+extern int grsec_enable_chroot_pivot;
82829+extern int grsec_enable_chroot_chdir;
82830+extern int grsec_enable_chroot_chmod;
82831+extern int grsec_enable_chroot_mknod;
82832+extern int grsec_enable_chroot_fchdir;
82833+extern int grsec_enable_chroot_nice;
82834+extern int grsec_enable_chroot_execlog;
82835+extern int grsec_enable_chroot_caps;
82836+extern int grsec_enable_chroot_sysctl;
82837+extern int grsec_enable_chroot_unix;
82838+extern int grsec_enable_symlinkown;
82839+extern kgid_t grsec_symlinkown_gid;
82840+extern int grsec_enable_tpe;
82841+extern kgid_t grsec_tpe_gid;
82842+extern int grsec_enable_tpe_all;
82843+extern int grsec_enable_tpe_invert;
82844+extern int grsec_enable_socket_all;
82845+extern kgid_t grsec_socket_all_gid;
82846+extern int grsec_enable_socket_client;
82847+extern kgid_t grsec_socket_client_gid;
82848+extern int grsec_enable_socket_server;
82849+extern kgid_t grsec_socket_server_gid;
82850+extern kgid_t grsec_audit_gid;
82851+extern int grsec_enable_group;
82852+extern int grsec_enable_log_rwxmaps;
82853+extern int grsec_enable_mount;
82854+extern int grsec_enable_chdir;
82855+extern int grsec_resource_logging;
82856+extern int grsec_enable_blackhole;
82857+extern int grsec_lastack_retries;
82858+extern int grsec_enable_brute;
82859+extern int grsec_enable_harden_ipc;
82860+extern int grsec_lock;
82861+
82862+extern spinlock_t grsec_alert_lock;
82863+extern unsigned long grsec_alert_wtime;
82864+extern unsigned long grsec_alert_fyet;
82865+
82866+extern spinlock_t grsec_audit_lock;
82867+
82868+extern rwlock_t grsec_exec_file_lock;
82869+
82870+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
82871+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
82872+ (tsk)->exec_file->f_path.mnt) : "/")
82873+
82874+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
82875+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
82876+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
82877+
82878+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
82879+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
82880+ (tsk)->exec_file->f_path.mnt) : "/")
82881+
82882+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
82883+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
82884+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
82885+
82886+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
82887+
82888+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
82889+
82890+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
82891+{
82892+ if (file1 && file2) {
82893+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
82894+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
82895+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
82896+ return true;
82897+ }
82898+
82899+ return false;
82900+}
82901+
82902+#define GR_CHROOT_CAPS {{ \
82903+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
82904+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
82905+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
82906+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
82907+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
82908+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
82909+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
82910+
82911+#define security_learn(normal_msg,args...) \
82912+({ \
82913+ read_lock(&grsec_exec_file_lock); \
82914+ gr_add_learn_entry(normal_msg "\n", ## args); \
82915+ read_unlock(&grsec_exec_file_lock); \
82916+})
82917+
82918+enum {
82919+ GR_DO_AUDIT,
82920+ GR_DONT_AUDIT,
82921+ /* used for non-audit messages that we shouldn't kill the task on */
82922+ GR_DONT_AUDIT_GOOD
82923+};
82924+
82925+enum {
82926+ GR_TTYSNIFF,
82927+ GR_RBAC,
82928+ GR_RBAC_STR,
82929+ GR_STR_RBAC,
82930+ GR_RBAC_MODE2,
82931+ GR_RBAC_MODE3,
82932+ GR_FILENAME,
82933+ GR_SYSCTL_HIDDEN,
82934+ GR_NOARGS,
82935+ GR_ONE_INT,
82936+ GR_ONE_INT_TWO_STR,
82937+ GR_ONE_STR,
82938+ GR_STR_INT,
82939+ GR_TWO_STR_INT,
82940+ GR_TWO_INT,
82941+ GR_TWO_U64,
82942+ GR_THREE_INT,
82943+ GR_FIVE_INT_TWO_STR,
82944+ GR_TWO_STR,
82945+ GR_THREE_STR,
82946+ GR_FOUR_STR,
82947+ GR_STR_FILENAME,
82948+ GR_FILENAME_STR,
82949+ GR_FILENAME_TWO_INT,
82950+ GR_FILENAME_TWO_INT_STR,
82951+ GR_TEXTREL,
82952+ GR_PTRACE,
82953+ GR_RESOURCE,
82954+ GR_CAP,
82955+ GR_SIG,
82956+ GR_SIG2,
82957+ GR_CRASH1,
82958+ GR_CRASH2,
82959+ GR_PSACCT,
82960+ GR_RWXMAP,
82961+ GR_RWXMAPVMA
82962+};
82963+
82964+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
82965+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
82966+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
82967+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
82968+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
82969+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
82970+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
82971+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
82972+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
82973+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
82974+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
82975+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
82976+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
82977+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
82978+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
82979+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
82980+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
82981+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
82982+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
82983+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
82984+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
82985+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
82986+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
82987+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
82988+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
82989+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
82990+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
82991+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
82992+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
82993+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
82994+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
82995+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
82996+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
82997+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
82998+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
82999+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
83000+
83001+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
83002+
83003+#endif
83004+
83005+#endif
83006diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
83007new file mode 100644
83008index 0000000..b02ba9d
83009--- /dev/null
83010+++ b/include/linux/grmsg.h
83011@@ -0,0 +1,117 @@
83012+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
83013+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
83014+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
83015+#define GR_STOPMOD_MSG "denied modification of module state by "
83016+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
83017+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
83018+#define GR_IOPERM_MSG "denied use of ioperm() by "
83019+#define GR_IOPL_MSG "denied use of iopl() by "
83020+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
83021+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
83022+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
83023+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
83024+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
83025+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
83026+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
83027+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
83028+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
83029+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
83030+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
83031+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
83032+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
83033+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
83034+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
83035+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
83036+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
83037+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
83038+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
83039+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
83040+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
83041+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
83042+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
83043+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
83044+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
83045+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
83046+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
83047+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
83048+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
83049+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
83050+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
83051+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
83052+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
83053+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
83054+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
83055+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
83056+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
83057+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
83058+#define GR_CHROOT_FHANDLE_MSG "denied use of file handles inside chroot by "
83059+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
83060+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
83061+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
83062+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
83063+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
83064+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
83065+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
83066+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
83067+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
83068+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
83069+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
83070+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
83071+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
83072+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
83073+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
83074+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
83075+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
83076+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
83077+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
83078+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
83079+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
83080+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
83081+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
83082+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
83083+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
83084+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
83085+#define GR_FAILFORK_MSG "failed fork with errno %s by "
83086+#define GR_NICE_CHROOT_MSG "denied priority change by "
83087+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
83088+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
83089+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
83090+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
83091+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
83092+#define GR_TIME_MSG "time set by "
83093+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
83094+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
83095+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
83096+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
83097+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
83098+#define GR_BIND_MSG "denied bind() by "
83099+#define GR_CONNECT_MSG "denied connect() by "
83100+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
83101+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
83102+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
83103+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
83104+#define GR_CAP_ACL_MSG "use of %s denied for "
83105+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
83106+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
83107+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
83108+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
83109+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
83110+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
83111+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
83112+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
83113+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
83114+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
83115+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
83116+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
83117+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
83118+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
83119+#define GR_VM86_MSG "denied use of vm86 by "
83120+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
83121+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
83122+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
83123+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
83124+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
83125+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
83126+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
83127+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
83128+#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
83129diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
83130new file mode 100644
83131index 0000000..10b9635
83132--- /dev/null
83133+++ b/include/linux/grsecurity.h
83134@@ -0,0 +1,254 @@
83135+#ifndef GR_SECURITY_H
83136+#define GR_SECURITY_H
83137+#include <linux/fs.h>
83138+#include <linux/fs_struct.h>
83139+#include <linux/binfmts.h>
83140+#include <linux/gracl.h>
83141+
83142+/* notify of brain-dead configs */
83143+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
83144+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
83145+#endif
83146+#if defined(CONFIG_GRKERNSEC_PROC) && !defined(CONFIG_GRKERNSEC_PROC_USER) && !defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
83147+#error "CONFIG_GRKERNSEC_PROC enabled, but neither CONFIG_GRKERNSEC_PROC_USER nor CONFIG_GRKERNSEC_PROC_USERGROUP enabled"
83148+#endif
83149+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
83150+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
83151+#endif
83152+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
83153+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
83154+#endif
83155+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
83156+#error "CONFIG_PAX enabled, but no PaX options are enabled."
83157+#endif
83158+
83159+int gr_handle_new_usb(void);
83160+
83161+void gr_handle_brute_attach(int dumpable);
83162+void gr_handle_brute_check(void);
83163+void gr_handle_kernel_exploit(void);
83164+
83165+char gr_roletype_to_char(void);
83166+
83167+int gr_proc_is_restricted(void);
83168+
83169+int gr_acl_enable_at_secure(void);
83170+
83171+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
83172+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
83173+
83174+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap);
83175+
83176+void gr_del_task_from_ip_table(struct task_struct *p);
83177+
83178+int gr_pid_is_chrooted(struct task_struct *p);
83179+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
83180+int gr_handle_chroot_nice(void);
83181+int gr_handle_chroot_sysctl(const int op);
83182+int gr_handle_chroot_setpriority(struct task_struct *p,
83183+ const int niceval);
83184+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
83185+int gr_chroot_fhandle(void);
83186+int gr_handle_chroot_chroot(const struct dentry *dentry,
83187+ const struct vfsmount *mnt);
83188+void gr_handle_chroot_chdir(const struct path *path);
83189+int gr_handle_chroot_chmod(const struct dentry *dentry,
83190+ const struct vfsmount *mnt, const int mode);
83191+int gr_handle_chroot_mknod(const struct dentry *dentry,
83192+ const struct vfsmount *mnt, const int mode);
83193+int gr_handle_chroot_mount(const struct dentry *dentry,
83194+ const struct vfsmount *mnt,
83195+ const char *dev_name);
83196+int gr_handle_chroot_pivot(void);
83197+int gr_handle_chroot_unix(const pid_t pid);
83198+
83199+int gr_handle_rawio(const struct inode *inode);
83200+
83201+void gr_handle_ioperm(void);
83202+void gr_handle_iopl(void);
83203+void gr_handle_msr_write(void);
83204+
83205+umode_t gr_acl_umask(void);
83206+
83207+int gr_tpe_allow(const struct file *file);
83208+
83209+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
83210+void gr_clear_chroot_entries(struct task_struct *task);
83211+
83212+void gr_log_forkfail(const int retval);
83213+void gr_log_timechange(void);
83214+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
83215+void gr_log_chdir(const struct dentry *dentry,
83216+ const struct vfsmount *mnt);
83217+void gr_log_chroot_exec(const struct dentry *dentry,
83218+ const struct vfsmount *mnt);
83219+void gr_log_remount(const char *devname, const int retval);
83220+void gr_log_unmount(const char *devname, const int retval);
83221+void gr_log_mount(const char *from, const char *to, const int retval);
83222+void gr_log_textrel(struct vm_area_struct *vma);
83223+void gr_log_ptgnustack(struct file *file);
83224+void gr_log_rwxmmap(struct file *file);
83225+void gr_log_rwxmprotect(struct vm_area_struct *vma);
83226+
83227+int gr_handle_follow_link(const struct inode *parent,
83228+ const struct inode *inode,
83229+ const struct dentry *dentry,
83230+ const struct vfsmount *mnt);
83231+int gr_handle_fifo(const struct dentry *dentry,
83232+ const struct vfsmount *mnt,
83233+ const struct dentry *dir, const int flag,
83234+ const int acc_mode);
83235+int gr_handle_hardlink(const struct dentry *dentry,
83236+ const struct vfsmount *mnt,
83237+ struct inode *inode,
83238+ const int mode, const struct filename *to);
83239+
83240+int gr_is_capable(const int cap);
83241+int gr_is_capable_nolog(const int cap);
83242+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
83243+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
83244+
83245+void gr_copy_label(struct task_struct *tsk);
83246+void gr_handle_crash(struct task_struct *task, const int sig);
83247+int gr_handle_signal(const struct task_struct *p, const int sig);
83248+int gr_check_crash_uid(const kuid_t uid);
83249+int gr_check_protected_task(const struct task_struct *task);
83250+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
83251+int gr_acl_handle_mmap(const struct file *file,
83252+ const unsigned long prot);
83253+int gr_acl_handle_mprotect(const struct file *file,
83254+ const unsigned long prot);
83255+int gr_check_hidden_task(const struct task_struct *tsk);
83256+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
83257+ const struct vfsmount *mnt);
83258+__u32 gr_acl_handle_utime(const struct dentry *dentry,
83259+ const struct vfsmount *mnt);
83260+__u32 gr_acl_handle_access(const struct dentry *dentry,
83261+ const struct vfsmount *mnt, const int fmode);
83262+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
83263+ const struct vfsmount *mnt, umode_t *mode);
83264+__u32 gr_acl_handle_chown(const struct dentry *dentry,
83265+ const struct vfsmount *mnt);
83266+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
83267+ const struct vfsmount *mnt);
83268+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
83269+ const struct vfsmount *mnt);
83270+int gr_handle_ptrace(struct task_struct *task, const long request);
83271+int gr_handle_proc_ptrace(struct task_struct *task);
83272+__u32 gr_acl_handle_execve(const struct dentry *dentry,
83273+ const struct vfsmount *mnt);
83274+int gr_check_crash_exec(const struct file *filp);
83275+int gr_acl_is_enabled(void);
83276+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
83277+ const kgid_t gid);
83278+int gr_set_proc_label(const struct dentry *dentry,
83279+ const struct vfsmount *mnt,
83280+ const int unsafe_flags);
83281+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
83282+ const struct vfsmount *mnt);
83283+__u32 gr_acl_handle_open(const struct dentry *dentry,
83284+ const struct vfsmount *mnt, int acc_mode);
83285+__u32 gr_acl_handle_creat(const struct dentry *dentry,
83286+ const struct dentry *p_dentry,
83287+ const struct vfsmount *p_mnt,
83288+ int open_flags, int acc_mode, const int imode);
83289+void gr_handle_create(const struct dentry *dentry,
83290+ const struct vfsmount *mnt);
83291+void gr_handle_proc_create(const struct dentry *dentry,
83292+ const struct inode *inode);
83293+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
83294+ const struct dentry *parent_dentry,
83295+ const struct vfsmount *parent_mnt,
83296+ const int mode);
83297+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
83298+ const struct dentry *parent_dentry,
83299+ const struct vfsmount *parent_mnt);
83300+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
83301+ const struct vfsmount *mnt);
83302+void gr_handle_delete(const ino_t ino, const dev_t dev);
83303+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
83304+ const struct vfsmount *mnt);
83305+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
83306+ const struct dentry *parent_dentry,
83307+ const struct vfsmount *parent_mnt,
83308+ const struct filename *from);
83309+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
83310+ const struct dentry *parent_dentry,
83311+ const struct vfsmount *parent_mnt,
83312+ const struct dentry *old_dentry,
83313+ const struct vfsmount *old_mnt, const struct filename *to);
83314+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
83315+int gr_acl_handle_rename(struct dentry *new_dentry,
83316+ struct dentry *parent_dentry,
83317+ const struct vfsmount *parent_mnt,
83318+ struct dentry *old_dentry,
83319+ struct inode *old_parent_inode,
83320+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags);
83321+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
83322+ struct dentry *old_dentry,
83323+ struct dentry *new_dentry,
83324+ struct vfsmount *mnt, const __u8 replace, unsigned int flags);
83325+__u32 gr_check_link(const struct dentry *new_dentry,
83326+ const struct dentry *parent_dentry,
83327+ const struct vfsmount *parent_mnt,
83328+ const struct dentry *old_dentry,
83329+ const struct vfsmount *old_mnt);
83330+int gr_acl_handle_filldir(const struct file *file, const char *name,
83331+ const unsigned int namelen, const ino_t ino);
83332+
83333+__u32 gr_acl_handle_unix(const struct dentry *dentry,
83334+ const struct vfsmount *mnt);
83335+void gr_acl_handle_exit(void);
83336+void gr_acl_handle_psacct(struct task_struct *task, const long code);
83337+int gr_acl_handle_procpidmem(const struct task_struct *task);
83338+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
83339+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
83340+void gr_audit_ptrace(struct task_struct *task);
83341+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
83342+void gr_put_exec_file(struct task_struct *task);
83343+
83344+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
83345+
83346+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
83347+extern void gr_learn_resource(const struct task_struct *task, const int res,
83348+ const unsigned long wanted, const int gt);
83349+#else
83350+static inline void gr_learn_resource(const struct task_struct *task, const int res,
83351+ const unsigned long wanted, const int gt)
83352+{
83353+}
83354+#endif
83355+
83356+#ifdef CONFIG_GRKERNSEC_RESLOG
83357+extern void gr_log_resource(const struct task_struct *task, const int res,
83358+ const unsigned long wanted, const int gt);
83359+#else
83360+static inline void gr_log_resource(const struct task_struct *task, const int res,
83361+ const unsigned long wanted, const int gt)
83362+{
83363+}
83364+#endif
83365+
83366+#ifdef CONFIG_GRKERNSEC
83367+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
83368+void gr_handle_vm86(void);
83369+void gr_handle_mem_readwrite(u64 from, u64 to);
83370+
83371+void gr_log_badprocpid(const char *entry);
83372+
83373+extern int grsec_enable_dmesg;
83374+extern int grsec_disable_privio;
83375+
83376+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
83377+extern kgid_t grsec_proc_gid;
83378+#endif
83379+
83380+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
83381+extern int grsec_enable_chroot_findtask;
83382+#endif
83383+#ifdef CONFIG_GRKERNSEC_SETXID
83384+extern int grsec_enable_setxid;
83385+#endif
83386+#endif
83387+
83388+#endif
83389diff --git a/include/linux/grsock.h b/include/linux/grsock.h
83390new file mode 100644
83391index 0000000..e7ffaaf
83392--- /dev/null
83393+++ b/include/linux/grsock.h
83394@@ -0,0 +1,19 @@
83395+#ifndef __GRSOCK_H
83396+#define __GRSOCK_H
83397+
83398+extern void gr_attach_curr_ip(const struct sock *sk);
83399+extern int gr_handle_sock_all(const int family, const int type,
83400+ const int protocol);
83401+extern int gr_handle_sock_server(const struct sockaddr *sck);
83402+extern int gr_handle_sock_server_other(const struct sock *sck);
83403+extern int gr_handle_sock_client(const struct sockaddr *sck);
83404+extern int gr_search_connect(struct socket * sock,
83405+ struct sockaddr_in * addr);
83406+extern int gr_search_bind(struct socket * sock,
83407+ struct sockaddr_in * addr);
83408+extern int gr_search_listen(struct socket * sock);
83409+extern int gr_search_accept(struct socket * sock);
83410+extern int gr_search_socket(const int domain, const int type,
83411+ const int protocol);
83412+
83413+#endif
83414diff --git a/include/linux/hash.h b/include/linux/hash.h
83415index d0494c3..69b7715 100644
83416--- a/include/linux/hash.h
83417+++ b/include/linux/hash.h
83418@@ -87,7 +87,7 @@ static inline u32 hash32_ptr(const void *ptr)
83419 struct fast_hash_ops {
83420 u32 (*hash)(const void *data, u32 len, u32 seed);
83421 u32 (*hash2)(const u32 *data, u32 len, u32 seed);
83422-};
83423+} __no_const;
83424
83425 /**
83426 * arch_fast_hash - Caclulates a hash over a given buffer that can have
83427diff --git a/include/linux/highmem.h b/include/linux/highmem.h
83428index 9286a46..373f27f 100644
83429--- a/include/linux/highmem.h
83430+++ b/include/linux/highmem.h
83431@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
83432 kunmap_atomic(kaddr);
83433 }
83434
83435+static inline void sanitize_highpage(struct page *page)
83436+{
83437+ void *kaddr;
83438+ unsigned long flags;
83439+
83440+ local_irq_save(flags);
83441+ kaddr = kmap_atomic(page);
83442+ clear_page(kaddr);
83443+ kunmap_atomic(kaddr);
83444+ local_irq_restore(flags);
83445+}
83446+
83447 static inline void zero_user_segments(struct page *page,
83448 unsigned start1, unsigned end1,
83449 unsigned start2, unsigned end2)
83450diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
83451index 1c7b89a..7dda400 100644
83452--- a/include/linux/hwmon-sysfs.h
83453+++ b/include/linux/hwmon-sysfs.h
83454@@ -25,7 +25,8 @@
83455 struct sensor_device_attribute{
83456 struct device_attribute dev_attr;
83457 int index;
83458-};
83459+} __do_const;
83460+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
83461 #define to_sensor_dev_attr(_dev_attr) \
83462 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
83463
83464@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
83465 struct device_attribute dev_attr;
83466 u8 index;
83467 u8 nr;
83468-};
83469+} __do_const;
83470+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
83471 #define to_sensor_dev_attr_2(_dev_attr) \
83472 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
83473
83474diff --git a/include/linux/i2c.h b/include/linux/i2c.h
83475index b556e0a..c10a515 100644
83476--- a/include/linux/i2c.h
83477+++ b/include/linux/i2c.h
83478@@ -378,6 +378,7 @@ struct i2c_algorithm {
83479 /* To determine what the adapter supports */
83480 u32 (*functionality) (struct i2c_adapter *);
83481 };
83482+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
83483
83484 /**
83485 * struct i2c_bus_recovery_info - I2C bus recovery information
83486diff --git a/include/linux/i2o.h b/include/linux/i2o.h
83487index d23c3c2..eb63c81 100644
83488--- a/include/linux/i2o.h
83489+++ b/include/linux/i2o.h
83490@@ -565,7 +565,7 @@ struct i2o_controller {
83491 struct i2o_device *exec; /* Executive */
83492 #if BITS_PER_LONG == 64
83493 spinlock_t context_list_lock; /* lock for context_list */
83494- atomic_t context_list_counter; /* needed for unique contexts */
83495+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
83496 struct list_head context_list; /* list of context id's
83497 and pointers */
83498 #endif
83499diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
83500index aff7ad8..3942bbd 100644
83501--- a/include/linux/if_pppox.h
83502+++ b/include/linux/if_pppox.h
83503@@ -76,7 +76,7 @@ struct pppox_proto {
83504 int (*ioctl)(struct socket *sock, unsigned int cmd,
83505 unsigned long arg);
83506 struct module *owner;
83507-};
83508+} __do_const;
83509
83510 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
83511 extern void unregister_pppox_proto(int proto_num);
83512diff --git a/include/linux/init.h b/include/linux/init.h
83513index 2df8e8d..3e1280d 100644
83514--- a/include/linux/init.h
83515+++ b/include/linux/init.h
83516@@ -37,9 +37,17 @@
83517 * section.
83518 */
83519
83520+#define add_init_latent_entropy __latent_entropy
83521+
83522+#ifdef CONFIG_MEMORY_HOTPLUG
83523+#define add_meminit_latent_entropy
83524+#else
83525+#define add_meminit_latent_entropy __latent_entropy
83526+#endif
83527+
83528 /* These are for everybody (although not all archs will actually
83529 discard it in modules) */
83530-#define __init __section(.init.text) __cold notrace
83531+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
83532 #define __initdata __section(.init.data)
83533 #define __initconst __constsection(.init.rodata)
83534 #define __exitdata __section(.exit.data)
83535@@ -100,7 +108,7 @@
83536 #define __cpuexitconst
83537
83538 /* Used for MEMORY_HOTPLUG */
83539-#define __meminit __section(.meminit.text) __cold notrace
83540+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
83541 #define __meminitdata __section(.meminit.data)
83542 #define __meminitconst __constsection(.meminit.rodata)
83543 #define __memexit __section(.memexit.text) __exitused __cold notrace
83544diff --git a/include/linux/init_task.h b/include/linux/init_task.h
83545index 2bb4c4f3..e0fac69 100644
83546--- a/include/linux/init_task.h
83547+++ b/include/linux/init_task.h
83548@@ -149,6 +149,12 @@ extern struct task_group root_task_group;
83549
83550 #define INIT_TASK_COMM "swapper"
83551
83552+#ifdef CONFIG_X86
83553+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
83554+#else
83555+#define INIT_TASK_THREAD_INFO
83556+#endif
83557+
83558 #ifdef CONFIG_RT_MUTEXES
83559 # define INIT_RT_MUTEXES(tsk) \
83560 .pi_waiters = RB_ROOT, \
83561@@ -196,6 +202,7 @@ extern struct task_group root_task_group;
83562 RCU_POINTER_INITIALIZER(cred, &init_cred), \
83563 .comm = INIT_TASK_COMM, \
83564 .thread = INIT_THREAD, \
83565+ INIT_TASK_THREAD_INFO \
83566 .fs = &init_fs, \
83567 .files = &init_files, \
83568 .signal = &init_signals, \
83569diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
83570index 698ad05..8601bb7 100644
83571--- a/include/linux/interrupt.h
83572+++ b/include/linux/interrupt.h
83573@@ -418,8 +418,8 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
83574
83575 struct softirq_action
83576 {
83577- void (*action)(struct softirq_action *);
83578-};
83579+ void (*action)(void);
83580+} __no_const;
83581
83582 asmlinkage void do_softirq(void);
83583 asmlinkage void __do_softirq(void);
83584@@ -433,7 +433,7 @@ static inline void do_softirq_own_stack(void)
83585 }
83586 #endif
83587
83588-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
83589+extern void open_softirq(int nr, void (*action)(void));
83590 extern void softirq_init(void);
83591 extern void __raise_softirq_irqoff(unsigned int nr);
83592
83593diff --git a/include/linux/iommu.h b/include/linux/iommu.h
83594index 20f9a52..63ee2e3 100644
83595--- a/include/linux/iommu.h
83596+++ b/include/linux/iommu.h
83597@@ -131,7 +131,7 @@ struct iommu_ops {
83598 u32 (*domain_get_windows)(struct iommu_domain *domain);
83599
83600 unsigned long pgsize_bitmap;
83601-};
83602+} __do_const;
83603
83604 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
83605 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
83606diff --git a/include/linux/ioport.h b/include/linux/ioport.h
83607index 142ec54..873e033 100644
83608--- a/include/linux/ioport.h
83609+++ b/include/linux/ioport.h
83610@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
83611 int adjust_resource(struct resource *res, resource_size_t start,
83612 resource_size_t size);
83613 resource_size_t resource_alignment(struct resource *res);
83614-static inline resource_size_t resource_size(const struct resource *res)
83615+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
83616 {
83617 return res->end - res->start + 1;
83618 }
83619diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
83620index 35e7eca..6afb7ad 100644
83621--- a/include/linux/ipc_namespace.h
83622+++ b/include/linux/ipc_namespace.h
83623@@ -69,7 +69,7 @@ struct ipc_namespace {
83624 struct user_namespace *user_ns;
83625
83626 unsigned int proc_inum;
83627-};
83628+} __randomize_layout;
83629
83630 extern struct ipc_namespace init_ipc_ns;
83631 extern atomic_t nr_ipc_ns;
83632diff --git a/include/linux/irq.h b/include/linux/irq.h
83633index 62af592..cc3b0d0 100644
83634--- a/include/linux/irq.h
83635+++ b/include/linux/irq.h
83636@@ -344,7 +344,8 @@ struct irq_chip {
83637 void (*irq_release_resources)(struct irq_data *data);
83638
83639 unsigned long flags;
83640-};
83641+} __do_const;
83642+typedef struct irq_chip __no_const irq_chip_no_const;
83643
83644 /*
83645 * irq_chip specific flags
83646diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
83647index 45e2d8c..26d85da 100644
83648--- a/include/linux/irqchip/arm-gic.h
83649+++ b/include/linux/irqchip/arm-gic.h
83650@@ -75,9 +75,11 @@
83651
83652 #ifndef __ASSEMBLY__
83653
83654+#include <linux/irq.h>
83655+
83656 struct device_node;
83657
83658-extern struct irq_chip gic_arch_extn;
83659+extern irq_chip_no_const gic_arch_extn;
83660
83661 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
83662 u32 offset, struct device_node *);
83663diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
83664index c367cbd..c9b79e6 100644
83665--- a/include/linux/jiffies.h
83666+++ b/include/linux/jiffies.h
83667@@ -280,20 +280,20 @@ extern unsigned long preset_lpj;
83668 /*
83669 * Convert various time units to each other:
83670 */
83671-extern unsigned int jiffies_to_msecs(const unsigned long j);
83672-extern unsigned int jiffies_to_usecs(const unsigned long j);
83673+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
83674+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
83675
83676-static inline u64 jiffies_to_nsecs(const unsigned long j)
83677+static inline u64 __intentional_overflow(-1) jiffies_to_nsecs(const unsigned long j)
83678 {
83679 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
83680 }
83681
83682-extern unsigned long msecs_to_jiffies(const unsigned int m);
83683-extern unsigned long usecs_to_jiffies(const unsigned int u);
83684+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
83685+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
83686 extern unsigned long timespec_to_jiffies(const struct timespec *value);
83687 extern void jiffies_to_timespec(const unsigned long jiffies,
83688- struct timespec *value);
83689-extern unsigned long timeval_to_jiffies(const struct timeval *value);
83690+ struct timespec *value) __intentional_overflow(-1);
83691+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
83692 extern void jiffies_to_timeval(const unsigned long jiffies,
83693 struct timeval *value);
83694
83695diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
83696index 6883e19..e854fcb 100644
83697--- a/include/linux/kallsyms.h
83698+++ b/include/linux/kallsyms.h
83699@@ -15,7 +15,8 @@
83700
83701 struct module;
83702
83703-#ifdef CONFIG_KALLSYMS
83704+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
83705+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
83706 /* Lookup the address for a symbol. Returns 0 if not found. */
83707 unsigned long kallsyms_lookup_name(const char *name);
83708
83709@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
83710 /* Stupid that this does nothing, but I didn't create this mess. */
83711 #define __print_symbol(fmt, addr)
83712 #endif /*CONFIG_KALLSYMS*/
83713+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
83714+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
83715+extern unsigned long kallsyms_lookup_name(const char *name);
83716+extern void __print_symbol(const char *fmt, unsigned long address);
83717+extern int sprint_backtrace(char *buffer, unsigned long address);
83718+extern int sprint_symbol(char *buffer, unsigned long address);
83719+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
83720+const char *kallsyms_lookup(unsigned long addr,
83721+ unsigned long *symbolsize,
83722+ unsigned long *offset,
83723+ char **modname, char *namebuf);
83724+extern int kallsyms_lookup_size_offset(unsigned long addr,
83725+ unsigned long *symbolsize,
83726+ unsigned long *offset);
83727+#endif
83728
83729 /* This macro allows us to keep printk typechecking */
83730 static __printf(1, 2)
83731diff --git a/include/linux/key-type.h b/include/linux/key-type.h
83732index 44792ee..6172f2a 100644
83733--- a/include/linux/key-type.h
83734+++ b/include/linux/key-type.h
83735@@ -132,7 +132,7 @@ struct key_type {
83736 /* internal fields */
83737 struct list_head link; /* link in types list */
83738 struct lock_class_key lock_class; /* key->sem lock class */
83739-};
83740+} __do_const;
83741
83742 extern struct key_type key_type_keyring;
83743
83744diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
83745index 6b06d37..19f605f 100644
83746--- a/include/linux/kgdb.h
83747+++ b/include/linux/kgdb.h
83748@@ -52,7 +52,7 @@ extern int kgdb_connected;
83749 extern int kgdb_io_module_registered;
83750
83751 extern atomic_t kgdb_setting_breakpoint;
83752-extern atomic_t kgdb_cpu_doing_single_step;
83753+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
83754
83755 extern struct task_struct *kgdb_usethread;
83756 extern struct task_struct *kgdb_contthread;
83757@@ -254,7 +254,7 @@ struct kgdb_arch {
83758 void (*correct_hw_break)(void);
83759
83760 void (*enable_nmi)(bool on);
83761-};
83762+} __do_const;
83763
83764 /**
83765 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
83766@@ -279,11 +279,11 @@ struct kgdb_io {
83767 void (*pre_exception) (void);
83768 void (*post_exception) (void);
83769 int is_console;
83770-};
83771+} __do_const;
83772
83773 extern struct kgdb_arch arch_kgdb_ops;
83774
83775-extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
83776+extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs);
83777
83778 #ifdef CONFIG_SERIAL_KGDB_NMI
83779 extern int kgdb_register_nmi_console(void);
83780diff --git a/include/linux/kmod.h b/include/linux/kmod.h
83781index 0555cc6..40116ce 100644
83782--- a/include/linux/kmod.h
83783+++ b/include/linux/kmod.h
83784@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
83785 * usually useless though. */
83786 extern __printf(2, 3)
83787 int __request_module(bool wait, const char *name, ...);
83788+extern __printf(3, 4)
83789+int ___request_module(bool wait, char *param_name, const char *name, ...);
83790 #define request_module(mod...) __request_module(true, mod)
83791 #define request_module_nowait(mod...) __request_module(false, mod)
83792 #define try_then_request_module(x, mod...) \
83793@@ -57,6 +59,9 @@ struct subprocess_info {
83794 struct work_struct work;
83795 struct completion *complete;
83796 char *path;
83797+#ifdef CONFIG_GRKERNSEC
83798+ char *origpath;
83799+#endif
83800 char **argv;
83801 char **envp;
83802 int wait;
83803diff --git a/include/linux/kobject.h b/include/linux/kobject.h
83804index 2d61b90..a1d0a13 100644
83805--- a/include/linux/kobject.h
83806+++ b/include/linux/kobject.h
83807@@ -118,7 +118,7 @@ struct kobj_type {
83808 struct attribute **default_attrs;
83809 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
83810 const void *(*namespace)(struct kobject *kobj);
83811-};
83812+} __do_const;
83813
83814 struct kobj_uevent_env {
83815 char *argv[3];
83816@@ -142,6 +142,7 @@ struct kobj_attribute {
83817 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
83818 const char *buf, size_t count);
83819 };
83820+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
83821
83822 extern const struct sysfs_ops kobj_sysfs_ops;
83823
83824@@ -169,7 +170,7 @@ struct kset {
83825 spinlock_t list_lock;
83826 struct kobject kobj;
83827 const struct kset_uevent_ops *uevent_ops;
83828-};
83829+} __randomize_layout;
83830
83831 extern void kset_init(struct kset *kset);
83832 extern int __must_check kset_register(struct kset *kset);
83833diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
83834index df32d25..fb52e27 100644
83835--- a/include/linux/kobject_ns.h
83836+++ b/include/linux/kobject_ns.h
83837@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
83838 const void *(*netlink_ns)(struct sock *sk);
83839 const void *(*initial_ns)(void);
83840 void (*drop_ns)(void *);
83841-};
83842+} __do_const;
83843
83844 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
83845 int kobj_ns_type_registered(enum kobj_ns_type type);
83846diff --git a/include/linux/kref.h b/include/linux/kref.h
83847index 484604d..0f6c5b6 100644
83848--- a/include/linux/kref.h
83849+++ b/include/linux/kref.h
83850@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
83851 static inline int kref_sub(struct kref *kref, unsigned int count,
83852 void (*release)(struct kref *kref))
83853 {
83854- WARN_ON(release == NULL);
83855+ BUG_ON(release == NULL);
83856
83857 if (atomic_sub_and_test((int) count, &kref->refcount)) {
83858 release(kref);
83859diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
83860index a4c33b3..e854710 100644
83861--- a/include/linux/kvm_host.h
83862+++ b/include/linux/kvm_host.h
83863@@ -452,7 +452,7 @@ static inline void kvm_irqfd_exit(void)
83864 {
83865 }
83866 #endif
83867-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
83868+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
83869 struct module *module);
83870 void kvm_exit(void);
83871
83872@@ -618,7 +618,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
83873 struct kvm_guest_debug *dbg);
83874 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
83875
83876-int kvm_arch_init(void *opaque);
83877+int kvm_arch_init(const void *opaque);
83878 void kvm_arch_exit(void);
83879
83880 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
83881diff --git a/include/linux/libata.h b/include/linux/libata.h
83882index 92abb49..e7fff2a 100644
83883--- a/include/linux/libata.h
83884+++ b/include/linux/libata.h
83885@@ -976,7 +976,7 @@ struct ata_port_operations {
83886 * fields must be pointers.
83887 */
83888 const struct ata_port_operations *inherits;
83889-};
83890+} __do_const;
83891
83892 struct ata_port_info {
83893 unsigned long flags;
83894diff --git a/include/linux/linkage.h b/include/linux/linkage.h
83895index a6a42dd..6c5ebce 100644
83896--- a/include/linux/linkage.h
83897+++ b/include/linux/linkage.h
83898@@ -36,6 +36,7 @@
83899 #endif
83900
83901 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
83902+#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
83903 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
83904
83905 /*
83906diff --git a/include/linux/list.h b/include/linux/list.h
83907index cbbb96f..602d023 100644
83908--- a/include/linux/list.h
83909+++ b/include/linux/list.h
83910@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
83911 extern void list_del(struct list_head *entry);
83912 #endif
83913
83914+extern void __pax_list_add(struct list_head *new,
83915+ struct list_head *prev,
83916+ struct list_head *next);
83917+static inline void pax_list_add(struct list_head *new, struct list_head *head)
83918+{
83919+ __pax_list_add(new, head, head->next);
83920+}
83921+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
83922+{
83923+ __pax_list_add(new, head->prev, head);
83924+}
83925+extern void pax_list_del(struct list_head *entry);
83926+
83927 /**
83928 * list_replace - replace old entry by new one
83929 * @old : the element to be replaced
83930@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
83931 INIT_LIST_HEAD(entry);
83932 }
83933
83934+extern void pax_list_del_init(struct list_head *entry);
83935+
83936 /**
83937 * list_move - delete from one list and add as another's head
83938 * @list: the entry to move
83939diff --git a/include/linux/lockref.h b/include/linux/lockref.h
83940index 4bfde0e..d6e2e09 100644
83941--- a/include/linux/lockref.h
83942+++ b/include/linux/lockref.h
83943@@ -47,4 +47,36 @@ static inline int __lockref_is_dead(const struct lockref *l)
83944 return ((int)l->count < 0);
83945 }
83946
83947+static inline unsigned int __lockref_read(struct lockref *lockref)
83948+{
83949+ return lockref->count;
83950+}
83951+
83952+static inline void __lockref_set(struct lockref *lockref, unsigned int count)
83953+{
83954+ lockref->count = count;
83955+}
83956+
83957+static inline void __lockref_inc(struct lockref *lockref)
83958+{
83959+
83960+#ifdef CONFIG_PAX_REFCOUNT
83961+ atomic_inc((atomic_t *)&lockref->count);
83962+#else
83963+ lockref->count++;
83964+#endif
83965+
83966+}
83967+
83968+static inline void __lockref_dec(struct lockref *lockref)
83969+{
83970+
83971+#ifdef CONFIG_PAX_REFCOUNT
83972+ atomic_dec((atomic_t *)&lockref->count);
83973+#else
83974+ lockref->count--;
83975+#endif
83976+
83977+}
83978+
83979 #endif /* __LINUX_LOCKREF_H */
83980diff --git a/include/linux/math64.h b/include/linux/math64.h
83981index c45c089..298841c 100644
83982--- a/include/linux/math64.h
83983+++ b/include/linux/math64.h
83984@@ -15,7 +15,7 @@
83985 * This is commonly provided by 32bit archs to provide an optimized 64bit
83986 * divide.
83987 */
83988-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83989+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83990 {
83991 *remainder = dividend % divisor;
83992 return dividend / divisor;
83993@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
83994 /**
83995 * div64_u64 - unsigned 64bit divide with 64bit divisor
83996 */
83997-static inline u64 div64_u64(u64 dividend, u64 divisor)
83998+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
83999 {
84000 return dividend / divisor;
84001 }
84002@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
84003 #define div64_ul(x, y) div_u64((x), (y))
84004
84005 #ifndef div_u64_rem
84006-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
84007+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
84008 {
84009 *remainder = do_div(dividend, divisor);
84010 return dividend;
84011@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
84012 #endif
84013
84014 #ifndef div64_u64
84015-extern u64 div64_u64(u64 dividend, u64 divisor);
84016+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
84017 #endif
84018
84019 #ifndef div64_s64
84020@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
84021 * divide.
84022 */
84023 #ifndef div_u64
84024-static inline u64 div_u64(u64 dividend, u32 divisor)
84025+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
84026 {
84027 u32 remainder;
84028 return div_u64_rem(dividend, divisor, &remainder);
84029diff --git a/include/linux/memory.h b/include/linux/memory.h
84030index bb7384e..8b8d8d1 100644
84031--- a/include/linux/memory.h
84032+++ b/include/linux/memory.h
84033@@ -35,7 +35,7 @@ struct memory_block {
84034 };
84035
84036 int arch_get_memory_phys_device(unsigned long start_pfn);
84037-unsigned long __weak memory_block_size_bytes(void);
84038+unsigned long memory_block_size_bytes(void);
84039
84040 /* These states are exposed to userspace as text strings in sysfs */
84041 #define MEM_ONLINE (1<<0) /* exposed to userspace */
84042diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
84043index f230a97..714c006 100644
84044--- a/include/linux/mempolicy.h
84045+++ b/include/linux/mempolicy.h
84046@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
84047 }
84048
84049 #define vma_policy(vma) ((vma)->vm_policy)
84050+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
84051+{
84052+ vma->vm_policy = pol;
84053+}
84054
84055 static inline void mpol_get(struct mempolicy *pol)
84056 {
84057@@ -228,6 +232,9 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
84058 }
84059
84060 #define vma_policy(vma) NULL
84061+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
84062+{
84063+}
84064
84065 static inline int
84066 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
84067diff --git a/include/linux/mm.h b/include/linux/mm.h
84068index 16e6f1e..d79d2f1 100644
84069--- a/include/linux/mm.h
84070+++ b/include/linux/mm.h
84071@@ -127,6 +127,11 @@ extern unsigned int kobjsize(const void *objp);
84072 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
84073 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
84074 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
84075+
84076+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
84077+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
84078+#endif
84079+
84080 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
84081
84082 #ifdef CONFIG_MEM_SOFT_DIRTY
84083@@ -237,8 +242,8 @@ struct vm_operations_struct {
84084 /* called by access_process_vm when get_user_pages() fails, typically
84085 * for use by special VMAs that can switch between memory and hardware
84086 */
84087- int (*access)(struct vm_area_struct *vma, unsigned long addr,
84088- void *buf, int len, int write);
84089+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
84090+ void *buf, size_t len, int write);
84091
84092 /* Called by the /proc/PID/maps code to ask the vma whether it
84093 * has a special name. Returning non-NULL will also cause this
84094@@ -274,6 +279,7 @@ struct vm_operations_struct {
84095 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
84096 unsigned long size, pgoff_t pgoff);
84097 };
84098+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
84099
84100 struct mmu_gather;
84101 struct inode;
84102@@ -1163,8 +1169,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
84103 unsigned long *pfn);
84104 int follow_phys(struct vm_area_struct *vma, unsigned long address,
84105 unsigned int flags, unsigned long *prot, resource_size_t *phys);
84106-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
84107- void *buf, int len, int write);
84108+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
84109+ void *buf, size_t len, int write);
84110
84111 static inline void unmap_shared_mapping_range(struct address_space *mapping,
84112 loff_t const holebegin, loff_t const holelen)
84113@@ -1203,9 +1209,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
84114 }
84115 #endif
84116
84117-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
84118-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
84119- void *buf, int len, int write);
84120+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
84121+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
84122+ void *buf, size_t len, int write);
84123
84124 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
84125 unsigned long start, unsigned long nr_pages,
84126@@ -1238,34 +1244,6 @@ int set_page_dirty_lock(struct page *page);
84127 int clear_page_dirty_for_io(struct page *page);
84128 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
84129
84130-/* Is the vma a continuation of the stack vma above it? */
84131-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
84132-{
84133- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
84134-}
84135-
84136-static inline int stack_guard_page_start(struct vm_area_struct *vma,
84137- unsigned long addr)
84138-{
84139- return (vma->vm_flags & VM_GROWSDOWN) &&
84140- (vma->vm_start == addr) &&
84141- !vma_growsdown(vma->vm_prev, addr);
84142-}
84143-
84144-/* Is the vma a continuation of the stack vma below it? */
84145-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
84146-{
84147- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
84148-}
84149-
84150-static inline int stack_guard_page_end(struct vm_area_struct *vma,
84151- unsigned long addr)
84152-{
84153- return (vma->vm_flags & VM_GROWSUP) &&
84154- (vma->vm_end == addr) &&
84155- !vma_growsup(vma->vm_next, addr);
84156-}
84157-
84158 extern pid_t
84159 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
84160
84161@@ -1365,6 +1343,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
84162 }
84163 #endif
84164
84165+#ifdef CONFIG_MMU
84166+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
84167+#else
84168+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
84169+{
84170+ return __pgprot(0);
84171+}
84172+#endif
84173+
84174 int vma_wants_writenotify(struct vm_area_struct *vma);
84175
84176 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
84177@@ -1383,8 +1370,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
84178 {
84179 return 0;
84180 }
84181+
84182+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
84183+ unsigned long address)
84184+{
84185+ return 0;
84186+}
84187 #else
84188 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
84189+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
84190 #endif
84191
84192 #ifdef __PAGETABLE_PMD_FOLDED
84193@@ -1393,8 +1387,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
84194 {
84195 return 0;
84196 }
84197+
84198+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
84199+ unsigned long address)
84200+{
84201+ return 0;
84202+}
84203 #else
84204 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
84205+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
84206 #endif
84207
84208 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
84209@@ -1412,11 +1413,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
84210 NULL: pud_offset(pgd, address);
84211 }
84212
84213+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
84214+{
84215+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
84216+ NULL: pud_offset(pgd, address);
84217+}
84218+
84219 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
84220 {
84221 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
84222 NULL: pmd_offset(pud, address);
84223 }
84224+
84225+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
84226+{
84227+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
84228+ NULL: pmd_offset(pud, address);
84229+}
84230 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
84231
84232 #if USE_SPLIT_PTE_PTLOCKS
84233@@ -1815,7 +1828,7 @@ extern int install_special_mapping(struct mm_struct *mm,
84234 unsigned long addr, unsigned long len,
84235 unsigned long flags, struct page **pages);
84236
84237-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
84238+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
84239
84240 extern unsigned long mmap_region(struct file *file, unsigned long addr,
84241 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
84242@@ -1823,6 +1836,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
84243 unsigned long len, unsigned long prot, unsigned long flags,
84244 unsigned long pgoff, unsigned long *populate);
84245 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
84246+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
84247
84248 #ifdef CONFIG_MMU
84249 extern int __mm_populate(unsigned long addr, unsigned long len,
84250@@ -1851,10 +1865,11 @@ struct vm_unmapped_area_info {
84251 unsigned long high_limit;
84252 unsigned long align_mask;
84253 unsigned long align_offset;
84254+ unsigned long threadstack_offset;
84255 };
84256
84257-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
84258-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
84259+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
84260+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
84261
84262 /*
84263 * Search for an unmapped address range.
84264@@ -1866,7 +1881,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
84265 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
84266 */
84267 static inline unsigned long
84268-vm_unmapped_area(struct vm_unmapped_area_info *info)
84269+vm_unmapped_area(const struct vm_unmapped_area_info *info)
84270 {
84271 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
84272 return unmapped_area(info);
84273@@ -1928,6 +1943,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
84274 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
84275 struct vm_area_struct **pprev);
84276
84277+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
84278+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
84279+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
84280+
84281 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
84282 NULL if none. Assume start_addr < end_addr. */
84283 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
84284@@ -1956,15 +1975,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
84285 return vma;
84286 }
84287
84288-#ifdef CONFIG_MMU
84289-pgprot_t vm_get_page_prot(unsigned long vm_flags);
84290-#else
84291-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
84292-{
84293- return __pgprot(0);
84294-}
84295-#endif
84296-
84297 #ifdef CONFIG_NUMA_BALANCING
84298 unsigned long change_prot_numa(struct vm_area_struct *vma,
84299 unsigned long start, unsigned long end);
84300@@ -2016,6 +2026,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
84301 static inline void vm_stat_account(struct mm_struct *mm,
84302 unsigned long flags, struct file *file, long pages)
84303 {
84304+
84305+#ifdef CONFIG_PAX_RANDMMAP
84306+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
84307+#endif
84308+
84309 mm->total_vm += pages;
84310 }
84311 #endif /* CONFIG_PROC_FS */
84312@@ -2104,7 +2119,7 @@ extern int unpoison_memory(unsigned long pfn);
84313 extern int sysctl_memory_failure_early_kill;
84314 extern int sysctl_memory_failure_recovery;
84315 extern void shake_page(struct page *p, int access);
84316-extern atomic_long_t num_poisoned_pages;
84317+extern atomic_long_unchecked_t num_poisoned_pages;
84318 extern int soft_offline_page(struct page *page, int flags);
84319
84320 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
84321@@ -2139,5 +2154,11 @@ void __init setup_nr_node_ids(void);
84322 static inline void setup_nr_node_ids(void) {}
84323 #endif
84324
84325+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
84326+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
84327+#else
84328+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
84329+#endif
84330+
84331 #endif /* __KERNEL__ */
84332 #endif /* _LINUX_MM_H */
84333diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
84334index 6e0b286..90d9c0d 100644
84335--- a/include/linux/mm_types.h
84336+++ b/include/linux/mm_types.h
84337@@ -308,7 +308,9 @@ struct vm_area_struct {
84338 #ifdef CONFIG_NUMA
84339 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
84340 #endif
84341-};
84342+
84343+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
84344+} __randomize_layout;
84345
84346 struct core_thread {
84347 struct task_struct *task;
84348@@ -454,7 +456,25 @@ struct mm_struct {
84349 bool tlb_flush_pending;
84350 #endif
84351 struct uprobes_state uprobes_state;
84352-};
84353+
84354+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
84355+ unsigned long pax_flags;
84356+#endif
84357+
84358+#ifdef CONFIG_PAX_DLRESOLVE
84359+ unsigned long call_dl_resolve;
84360+#endif
84361+
84362+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
84363+ unsigned long call_syscall;
84364+#endif
84365+
84366+#ifdef CONFIG_PAX_ASLR
84367+ unsigned long delta_mmap; /* randomized offset */
84368+ unsigned long delta_stack; /* randomized offset */
84369+#endif
84370+
84371+} __randomize_layout;
84372
84373 static inline void mm_init_cpumask(struct mm_struct *mm)
84374 {
84375diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
84376index c5d5278..f0b68c8 100644
84377--- a/include/linux/mmiotrace.h
84378+++ b/include/linux/mmiotrace.h
84379@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
84380 /* Called from ioremap.c */
84381 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
84382 void __iomem *addr);
84383-extern void mmiotrace_iounmap(volatile void __iomem *addr);
84384+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
84385
84386 /* For anyone to insert markers. Remember trailing newline. */
84387 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
84388@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
84389 {
84390 }
84391
84392-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
84393+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
84394 {
84395 }
84396
84397diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
84398index 318df70..b74ec01 100644
84399--- a/include/linux/mmzone.h
84400+++ b/include/linux/mmzone.h
84401@@ -518,7 +518,7 @@ struct zone {
84402
84403 ZONE_PADDING(_pad3_)
84404 /* Zone statistics */
84405- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
84406+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
84407 } ____cacheline_internodealigned_in_smp;
84408
84409 typedef enum {
84410diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
84411index 44eeef0..a92d3f9 100644
84412--- a/include/linux/mod_devicetable.h
84413+++ b/include/linux/mod_devicetable.h
84414@@ -139,7 +139,7 @@ struct usb_device_id {
84415 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
84416 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
84417
84418-#define HID_ANY_ID (~0)
84419+#define HID_ANY_ID (~0U)
84420 #define HID_BUS_ANY 0xffff
84421 #define HID_GROUP_ANY 0x0000
84422
84423@@ -475,7 +475,7 @@ struct dmi_system_id {
84424 const char *ident;
84425 struct dmi_strmatch matches[4];
84426 void *driver_data;
84427-};
84428+} __do_const;
84429 /*
84430 * struct dmi_device_id appears during expansion of
84431 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
84432diff --git a/include/linux/module.h b/include/linux/module.h
84433index 71f282a..b2387e2 100644
84434--- a/include/linux/module.h
84435+++ b/include/linux/module.h
84436@@ -17,9 +17,11 @@
84437 #include <linux/moduleparam.h>
84438 #include <linux/jump_label.h>
84439 #include <linux/export.h>
84440+#include <linux/fs.h>
84441
84442 #include <linux/percpu.h>
84443 #include <asm/module.h>
84444+#include <asm/pgtable.h>
84445
84446 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
84447 #define MODULE_SIG_STRING "~Module signature appended~\n"
84448@@ -42,7 +44,7 @@ struct module_kobject {
84449 struct kobject *drivers_dir;
84450 struct module_param_attrs *mp;
84451 struct completion *kobj_completion;
84452-};
84453+} __randomize_layout;
84454
84455 struct module_attribute {
84456 struct attribute attr;
84457@@ -54,12 +56,13 @@ struct module_attribute {
84458 int (*test)(struct module *);
84459 void (*free)(struct module *);
84460 };
84461+typedef struct module_attribute __no_const module_attribute_no_const;
84462
84463 struct module_version_attribute {
84464 struct module_attribute mattr;
84465 const char *module_name;
84466 const char *version;
84467-} __attribute__ ((__aligned__(sizeof(void *))));
84468+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
84469
84470 extern ssize_t __modver_version_show(struct module_attribute *,
84471 struct module_kobject *, char *);
84472@@ -235,7 +238,7 @@ struct module {
84473
84474 /* Sysfs stuff. */
84475 struct module_kobject mkobj;
84476- struct module_attribute *modinfo_attrs;
84477+ module_attribute_no_const *modinfo_attrs;
84478 const char *version;
84479 const char *srcversion;
84480 struct kobject *holders_dir;
84481@@ -284,19 +287,16 @@ struct module {
84482 int (*init)(void);
84483
84484 /* If this is non-NULL, vfree after init() returns */
84485- void *module_init;
84486+ void *module_init_rx, *module_init_rw;
84487
84488 /* Here is the actual code + data, vfree'd on unload. */
84489- void *module_core;
84490+ void *module_core_rx, *module_core_rw;
84491
84492 /* Here are the sizes of the init and core sections */
84493- unsigned int init_size, core_size;
84494+ unsigned int init_size_rw, core_size_rw;
84495
84496 /* The size of the executable code in each section. */
84497- unsigned int init_text_size, core_text_size;
84498-
84499- /* Size of RO sections of the module (text+rodata) */
84500- unsigned int init_ro_size, core_ro_size;
84501+ unsigned int init_size_rx, core_size_rx;
84502
84503 /* Arch-specific module values */
84504 struct mod_arch_specific arch;
84505@@ -352,6 +352,10 @@ struct module {
84506 #ifdef CONFIG_EVENT_TRACING
84507 struct ftrace_event_call **trace_events;
84508 unsigned int num_trace_events;
84509+ struct file_operations trace_id;
84510+ struct file_operations trace_enable;
84511+ struct file_operations trace_format;
84512+ struct file_operations trace_filter;
84513 #endif
84514 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
84515 unsigned int num_ftrace_callsites;
84516@@ -375,7 +379,7 @@ struct module {
84517 ctor_fn_t *ctors;
84518 unsigned int num_ctors;
84519 #endif
84520-};
84521+} __randomize_layout;
84522 #ifndef MODULE_ARCH_INIT
84523 #define MODULE_ARCH_INIT {}
84524 #endif
84525@@ -396,18 +400,48 @@ bool is_module_address(unsigned long addr);
84526 bool is_module_percpu_address(unsigned long addr);
84527 bool is_module_text_address(unsigned long addr);
84528
84529+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
84530+{
84531+
84532+#ifdef CONFIG_PAX_KERNEXEC
84533+ if (ktla_ktva(addr) >= (unsigned long)start &&
84534+ ktla_ktva(addr) < (unsigned long)start + size)
84535+ return 1;
84536+#endif
84537+
84538+ return ((void *)addr >= start && (void *)addr < start + size);
84539+}
84540+
84541+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
84542+{
84543+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
84544+}
84545+
84546+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
84547+{
84548+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
84549+}
84550+
84551+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
84552+{
84553+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
84554+}
84555+
84556+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
84557+{
84558+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
84559+}
84560+
84561 static inline bool within_module_core(unsigned long addr,
84562 const struct module *mod)
84563 {
84564- return (unsigned long)mod->module_core <= addr &&
84565- addr < (unsigned long)mod->module_core + mod->core_size;
84566+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
84567 }
84568
84569 static inline bool within_module_init(unsigned long addr,
84570 const struct module *mod)
84571 {
84572- return (unsigned long)mod->module_init <= addr &&
84573- addr < (unsigned long)mod->module_init + mod->init_size;
84574+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
84575 }
84576
84577 static inline bool within_module(unsigned long addr, const struct module *mod)
84578diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
84579index 7eeb9bb..68f37e0 100644
84580--- a/include/linux/moduleloader.h
84581+++ b/include/linux/moduleloader.h
84582@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
84583 sections. Returns NULL on failure. */
84584 void *module_alloc(unsigned long size);
84585
84586+#ifdef CONFIG_PAX_KERNEXEC
84587+void *module_alloc_exec(unsigned long size);
84588+#else
84589+#define module_alloc_exec(x) module_alloc(x)
84590+#endif
84591+
84592 /* Free memory returned from module_alloc. */
84593 void module_free(struct module *mod, void *module_region);
84594
84595+#ifdef CONFIG_PAX_KERNEXEC
84596+void module_free_exec(struct module *mod, void *module_region);
84597+#else
84598+#define module_free_exec(x, y) module_free((x), (y))
84599+#endif
84600+
84601 /*
84602 * Apply the given relocation to the (simplified) ELF. Return -error
84603 * or 0.
84604@@ -45,8 +57,10 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
84605 unsigned int relsec,
84606 struct module *me)
84607 {
84608+#ifdef CONFIG_MODULES
84609 printk(KERN_ERR "module %s: REL relocation unsupported\n",
84610 module_name(me));
84611+#endif
84612 return -ENOEXEC;
84613 }
84614 #endif
84615@@ -68,8 +82,10 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
84616 unsigned int relsec,
84617 struct module *me)
84618 {
84619+#ifdef CONFIG_MODULES
84620 printk(KERN_ERR "module %s: REL relocation unsupported\n",
84621 module_name(me));
84622+#endif
84623 return -ENOEXEC;
84624 }
84625 #endif
84626diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
84627index 494f99e..5059f63 100644
84628--- a/include/linux/moduleparam.h
84629+++ b/include/linux/moduleparam.h
84630@@ -293,7 +293,7 @@ static inline void __kernel_param_unlock(void)
84631 * @len is usually just sizeof(string).
84632 */
84633 #define module_param_string(name, string, len, perm) \
84634- static const struct kparam_string __param_string_##name \
84635+ static const struct kparam_string __param_string_##name __used \
84636 = { len, string }; \
84637 __module_param_call(MODULE_PARAM_PREFIX, name, \
84638 &param_ops_string, \
84639@@ -437,7 +437,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
84640 */
84641 #define module_param_array_named(name, array, type, nump, perm) \
84642 param_check_##type(name, &(array)[0]); \
84643- static const struct kparam_array __param_arr_##name \
84644+ static const struct kparam_array __param_arr_##name __used \
84645 = { .max = ARRAY_SIZE(array), .num = nump, \
84646 .ops = &param_ops_##type, \
84647 .elemsize = sizeof(array[0]), .elem = array }; \
84648diff --git a/include/linux/mount.h b/include/linux/mount.h
84649index 9262e4b..0a45f98 100644
84650--- a/include/linux/mount.h
84651+++ b/include/linux/mount.h
84652@@ -66,7 +66,7 @@ struct vfsmount {
84653 struct dentry *mnt_root; /* root of the mounted tree */
84654 struct super_block *mnt_sb; /* pointer to superblock */
84655 int mnt_flags;
84656-};
84657+} __randomize_layout;
84658
84659 struct file; /* forward dec */
84660 struct path;
84661diff --git a/include/linux/namei.h b/include/linux/namei.h
84662index 492de72..1bddcd4 100644
84663--- a/include/linux/namei.h
84664+++ b/include/linux/namei.h
84665@@ -19,7 +19,7 @@ struct nameidata {
84666 unsigned seq, m_seq;
84667 int last_type;
84668 unsigned depth;
84669- char *saved_names[MAX_NESTED_LINKS + 1];
84670+ const char *saved_names[MAX_NESTED_LINKS + 1];
84671 };
84672
84673 /*
84674@@ -83,12 +83,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
84675
84676 extern void nd_jump_link(struct nameidata *nd, struct path *path);
84677
84678-static inline void nd_set_link(struct nameidata *nd, char *path)
84679+static inline void nd_set_link(struct nameidata *nd, const char *path)
84680 {
84681 nd->saved_names[nd->depth] = path;
84682 }
84683
84684-static inline char *nd_get_link(struct nameidata *nd)
84685+static inline const char *nd_get_link(const struct nameidata *nd)
84686 {
84687 return nd->saved_names[nd->depth];
84688 }
84689diff --git a/include/linux/net.h b/include/linux/net.h
84690index 17d8339..81656c0 100644
84691--- a/include/linux/net.h
84692+++ b/include/linux/net.h
84693@@ -192,7 +192,7 @@ struct net_proto_family {
84694 int (*create)(struct net *net, struct socket *sock,
84695 int protocol, int kern);
84696 struct module *owner;
84697-};
84698+} __do_const;
84699
84700 struct iovec;
84701 struct kvec;
84702diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
84703index c8e388e..5d8cd9b 100644
84704--- a/include/linux/netdevice.h
84705+++ b/include/linux/netdevice.h
84706@@ -1147,6 +1147,7 @@ struct net_device_ops {
84707 void *priv);
84708 int (*ndo_get_lock_subclass)(struct net_device *dev);
84709 };
84710+typedef struct net_device_ops __no_const net_device_ops_no_const;
84711
84712 /**
84713 * enum net_device_priv_flags - &struct net_device priv_flags
84714@@ -1485,10 +1486,10 @@ struct net_device {
84715
84716 struct net_device_stats stats;
84717
84718- atomic_long_t rx_dropped;
84719- atomic_long_t tx_dropped;
84720+ atomic_long_unchecked_t rx_dropped;
84721+ atomic_long_unchecked_t tx_dropped;
84722
84723- atomic_t carrier_changes;
84724+ atomic_unchecked_t carrier_changes;
84725
84726 #ifdef CONFIG_WIRELESS_EXT
84727 const struct iw_handler_def * wireless_handlers;
84728diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
84729index 2517ece..0bbfcfb 100644
84730--- a/include/linux/netfilter.h
84731+++ b/include/linux/netfilter.h
84732@@ -85,7 +85,7 @@ struct nf_sockopt_ops {
84733 #endif
84734 /* Use the module struct to lock set/get code in place */
84735 struct module *owner;
84736-};
84737+} __do_const;
84738
84739 /* Function to register/unregister hook points. */
84740 int nf_register_hook(struct nf_hook_ops *reg);
84741diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
84742index e955d47..04a5338 100644
84743--- a/include/linux/netfilter/nfnetlink.h
84744+++ b/include/linux/netfilter/nfnetlink.h
84745@@ -19,7 +19,7 @@ struct nfnl_callback {
84746 const struct nlattr * const cda[]);
84747 const struct nla_policy *policy; /* netlink attribute policy */
84748 const u_int16_t attr_count; /* number of nlattr's */
84749-};
84750+} __do_const;
84751
84752 struct nfnetlink_subsystem {
84753 const char *name;
84754diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
84755new file mode 100644
84756index 0000000..33f4af8
84757--- /dev/null
84758+++ b/include/linux/netfilter/xt_gradm.h
84759@@ -0,0 +1,9 @@
84760+#ifndef _LINUX_NETFILTER_XT_GRADM_H
84761+#define _LINUX_NETFILTER_XT_GRADM_H 1
84762+
84763+struct xt_gradm_mtinfo {
84764+ __u16 flags;
84765+ __u16 invflags;
84766+};
84767+
84768+#endif
84769diff --git a/include/linux/nls.h b/include/linux/nls.h
84770index 520681b..2b7fabb 100644
84771--- a/include/linux/nls.h
84772+++ b/include/linux/nls.h
84773@@ -31,7 +31,7 @@ struct nls_table {
84774 const unsigned char *charset2upper;
84775 struct module *owner;
84776 struct nls_table *next;
84777-};
84778+} __do_const;
84779
84780 /* this value hold the maximum octet of charset */
84781 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
84782@@ -46,7 +46,7 @@ enum utf16_endian {
84783 /* nls_base.c */
84784 extern int __register_nls(struct nls_table *, struct module *);
84785 extern int unregister_nls(struct nls_table *);
84786-extern struct nls_table *load_nls(char *);
84787+extern struct nls_table *load_nls(const char *);
84788 extern void unload_nls(struct nls_table *);
84789 extern struct nls_table *load_nls_default(void);
84790 #define register_nls(nls) __register_nls((nls), THIS_MODULE)
84791diff --git a/include/linux/notifier.h b/include/linux/notifier.h
84792index d14a4c3..a078786 100644
84793--- a/include/linux/notifier.h
84794+++ b/include/linux/notifier.h
84795@@ -54,7 +54,8 @@ struct notifier_block {
84796 notifier_fn_t notifier_call;
84797 struct notifier_block __rcu *next;
84798 int priority;
84799-};
84800+} __do_const;
84801+typedef struct notifier_block __no_const notifier_block_no_const;
84802
84803 struct atomic_notifier_head {
84804 spinlock_t lock;
84805diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
84806index b2a0f15..4d7da32 100644
84807--- a/include/linux/oprofile.h
84808+++ b/include/linux/oprofile.h
84809@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
84810 int oprofilefs_create_ro_ulong(struct dentry * root,
84811 char const * name, ulong * val);
84812
84813-/** Create a file for read-only access to an atomic_t. */
84814+/** Create a file for read-only access to an atomic_unchecked_t. */
84815 int oprofilefs_create_ro_atomic(struct dentry * root,
84816- char const * name, atomic_t * val);
84817+ char const * name, atomic_unchecked_t * val);
84818
84819 /** create a directory */
84820 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
84821diff --git a/include/linux/padata.h b/include/linux/padata.h
84822index 4386946..f50c615 100644
84823--- a/include/linux/padata.h
84824+++ b/include/linux/padata.h
84825@@ -129,7 +129,7 @@ struct parallel_data {
84826 struct padata_serial_queue __percpu *squeue;
84827 atomic_t reorder_objects;
84828 atomic_t refcnt;
84829- atomic_t seq_nr;
84830+ atomic_unchecked_t seq_nr;
84831 struct padata_cpumask cpumask;
84832 spinlock_t lock ____cacheline_aligned;
84833 unsigned int processed;
84834diff --git a/include/linux/path.h b/include/linux/path.h
84835index d137218..be0c176 100644
84836--- a/include/linux/path.h
84837+++ b/include/linux/path.h
84838@@ -1,13 +1,15 @@
84839 #ifndef _LINUX_PATH_H
84840 #define _LINUX_PATH_H
84841
84842+#include <linux/compiler.h>
84843+
84844 struct dentry;
84845 struct vfsmount;
84846
84847 struct path {
84848 struct vfsmount *mnt;
84849 struct dentry *dentry;
84850-};
84851+} __randomize_layout;
84852
84853 extern void path_get(const struct path *);
84854 extern void path_put(const struct path *);
84855diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
84856index 5f2e559..7d59314 100644
84857--- a/include/linux/pci_hotplug.h
84858+++ b/include/linux/pci_hotplug.h
84859@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
84860 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
84861 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
84862 int (*reset_slot) (struct hotplug_slot *slot, int probe);
84863-};
84864+} __do_const;
84865+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
84866
84867 /**
84868 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
84869diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
84870index 707617a..28a2e7e 100644
84871--- a/include/linux/perf_event.h
84872+++ b/include/linux/perf_event.h
84873@@ -339,8 +339,8 @@ struct perf_event {
84874
84875 enum perf_event_active_state state;
84876 unsigned int attach_state;
84877- local64_t count;
84878- atomic64_t child_count;
84879+ local64_t count; /* PaX: fix it one day */
84880+ atomic64_unchecked_t child_count;
84881
84882 /*
84883 * These are the total time in nanoseconds that the event
84884@@ -391,8 +391,8 @@ struct perf_event {
84885 * These accumulate total time (in nanoseconds) that children
84886 * events have been enabled and running, respectively.
84887 */
84888- atomic64_t child_total_time_enabled;
84889- atomic64_t child_total_time_running;
84890+ atomic64_unchecked_t child_total_time_enabled;
84891+ atomic64_unchecked_t child_total_time_running;
84892
84893 /*
84894 * Protect attach/detach and child_list:
84895@@ -722,7 +722,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
84896 entry->ip[entry->nr++] = ip;
84897 }
84898
84899-extern int sysctl_perf_event_paranoid;
84900+extern int sysctl_perf_event_legitimately_concerned;
84901 extern int sysctl_perf_event_mlock;
84902 extern int sysctl_perf_event_sample_rate;
84903 extern int sysctl_perf_cpu_time_max_percent;
84904@@ -737,19 +737,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
84905 loff_t *ppos);
84906
84907
84908+static inline bool perf_paranoid_any(void)
84909+{
84910+ return sysctl_perf_event_legitimately_concerned > 2;
84911+}
84912+
84913 static inline bool perf_paranoid_tracepoint_raw(void)
84914 {
84915- return sysctl_perf_event_paranoid > -1;
84916+ return sysctl_perf_event_legitimately_concerned > -1;
84917 }
84918
84919 static inline bool perf_paranoid_cpu(void)
84920 {
84921- return sysctl_perf_event_paranoid > 0;
84922+ return sysctl_perf_event_legitimately_concerned > 0;
84923 }
84924
84925 static inline bool perf_paranoid_kernel(void)
84926 {
84927- return sysctl_perf_event_paranoid > 1;
84928+ return sysctl_perf_event_legitimately_concerned > 1;
84929 }
84930
84931 extern void perf_event_init(void);
84932@@ -880,7 +885,7 @@ struct perf_pmu_events_attr {
84933 struct device_attribute attr;
84934 u64 id;
84935 const char *event_str;
84936-};
84937+} __do_const;
84938
84939 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
84940 static struct perf_pmu_events_attr _var = { \
84941diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
84942index 1997ffc..4f1f44d 100644
84943--- a/include/linux/pid_namespace.h
84944+++ b/include/linux/pid_namespace.h
84945@@ -44,7 +44,7 @@ struct pid_namespace {
84946 int hide_pid;
84947 int reboot; /* group exit code if this pidns was rebooted */
84948 unsigned int proc_inum;
84949-};
84950+} __randomize_layout;
84951
84952 extern struct pid_namespace init_pid_ns;
84953
84954diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
84955index eb8b8ac..62649e1 100644
84956--- a/include/linux/pipe_fs_i.h
84957+++ b/include/linux/pipe_fs_i.h
84958@@ -47,10 +47,10 @@ struct pipe_inode_info {
84959 struct mutex mutex;
84960 wait_queue_head_t wait;
84961 unsigned int nrbufs, curbuf, buffers;
84962- unsigned int readers;
84963- unsigned int writers;
84964- unsigned int files;
84965- unsigned int waiting_writers;
84966+ atomic_t readers;
84967+ atomic_t writers;
84968+ atomic_t files;
84969+ atomic_t waiting_writers;
84970 unsigned int r_counter;
84971 unsigned int w_counter;
84972 struct page *tmp_page;
84973diff --git a/include/linux/pm.h b/include/linux/pm.h
84974index 72c0fe0..26918ed 100644
84975--- a/include/linux/pm.h
84976+++ b/include/linux/pm.h
84977@@ -620,6 +620,7 @@ extern int dev_pm_put_subsys_data(struct device *dev);
84978 struct dev_pm_domain {
84979 struct dev_pm_ops ops;
84980 };
84981+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
84982
84983 /*
84984 * The PM_EVENT_ messages are also used by drivers implementing the legacy
84985diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
84986index ebc4c76..7fab7b0 100644
84987--- a/include/linux/pm_domain.h
84988+++ b/include/linux/pm_domain.h
84989@@ -44,11 +44,11 @@ struct gpd_dev_ops {
84990 int (*thaw_early)(struct device *dev);
84991 int (*thaw)(struct device *dev);
84992 bool (*active_wakeup)(struct device *dev);
84993-};
84994+} __no_const;
84995
84996 struct gpd_cpu_data {
84997 unsigned int saved_exit_latency;
84998- struct cpuidle_state *idle_state;
84999+ cpuidle_state_no_const *idle_state;
85000 };
85001
85002 struct generic_pm_domain {
85003diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
85004index 367f49b..d2f5a14 100644
85005--- a/include/linux/pm_runtime.h
85006+++ b/include/linux/pm_runtime.h
85007@@ -125,7 +125,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
85008
85009 static inline void pm_runtime_mark_last_busy(struct device *dev)
85010 {
85011- ACCESS_ONCE(dev->power.last_busy) = jiffies;
85012+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
85013 }
85014
85015 #else /* !CONFIG_PM_RUNTIME */
85016diff --git a/include/linux/pnp.h b/include/linux/pnp.h
85017index 195aafc..49a7bc2 100644
85018--- a/include/linux/pnp.h
85019+++ b/include/linux/pnp.h
85020@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
85021 struct pnp_fixup {
85022 char id[7];
85023 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
85024-};
85025+} __do_const;
85026
85027 /* config parameters */
85028 #define PNP_CONFIG_NORMAL 0x0001
85029diff --git a/include/linux/poison.h b/include/linux/poison.h
85030index 2110a81..13a11bb 100644
85031--- a/include/linux/poison.h
85032+++ b/include/linux/poison.h
85033@@ -19,8 +19,8 @@
85034 * under normal circumstances, used to verify that nobody uses
85035 * non-initialized list entries.
85036 */
85037-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
85038-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
85039+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
85040+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
85041
85042 /********** include/linux/timer.h **********/
85043 /*
85044diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
85045index d8b187c3..9a9257a 100644
85046--- a/include/linux/power/smartreflex.h
85047+++ b/include/linux/power/smartreflex.h
85048@@ -238,7 +238,7 @@ struct omap_sr_class_data {
85049 int (*notify)(struct omap_sr *sr, u32 status);
85050 u8 notify_flags;
85051 u8 class_type;
85052-};
85053+} __do_const;
85054
85055 /**
85056 * struct omap_sr_nvalue_table - Smartreflex n-target value info
85057diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
85058index 4ea1d37..80f4b33 100644
85059--- a/include/linux/ppp-comp.h
85060+++ b/include/linux/ppp-comp.h
85061@@ -84,7 +84,7 @@ struct compressor {
85062 struct module *owner;
85063 /* Extra skb space needed by the compressor algorithm */
85064 unsigned int comp_extra;
85065-};
85066+} __do_const;
85067
85068 /*
85069 * The return value from decompress routine is the length of the
85070diff --git a/include/linux/preempt.h b/include/linux/preempt.h
85071index de83b4e..c4b997d 100644
85072--- a/include/linux/preempt.h
85073+++ b/include/linux/preempt.h
85074@@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
85075 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
85076 #endif
85077
85078+#define raw_preempt_count_add(val) __preempt_count_add(val)
85079+#define raw_preempt_count_sub(val) __preempt_count_sub(val)
85080+
85081 #define __preempt_count_inc() __preempt_count_add(1)
85082 #define __preempt_count_dec() __preempt_count_sub(1)
85083
85084 #define preempt_count_inc() preempt_count_add(1)
85085+#define raw_preempt_count_inc() raw_preempt_count_add(1)
85086 #define preempt_count_dec() preempt_count_sub(1)
85087+#define raw_preempt_count_dec() raw_preempt_count_sub(1)
85088
85089 #ifdef CONFIG_PREEMPT_COUNT
85090
85091@@ -41,6 +46,12 @@ do { \
85092 barrier(); \
85093 } while (0)
85094
85095+#define raw_preempt_disable() \
85096+do { \
85097+ raw_preempt_count_inc(); \
85098+ barrier(); \
85099+} while (0)
85100+
85101 #define sched_preempt_enable_no_resched() \
85102 do { \
85103 barrier(); \
85104@@ -49,6 +60,12 @@ do { \
85105
85106 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
85107
85108+#define raw_preempt_enable_no_resched() \
85109+do { \
85110+ barrier(); \
85111+ raw_preempt_count_dec(); \
85112+} while (0)
85113+
85114 #ifdef CONFIG_PREEMPT
85115 #define preempt_enable() \
85116 do { \
85117@@ -113,8 +130,10 @@ do { \
85118 * region.
85119 */
85120 #define preempt_disable() barrier()
85121+#define raw_preempt_disable() barrier()
85122 #define sched_preempt_enable_no_resched() barrier()
85123 #define preempt_enable_no_resched() barrier()
85124+#define raw_preempt_enable_no_resched() barrier()
85125 #define preempt_enable() barrier()
85126 #define preempt_check_resched() do { } while (0)
85127
85128@@ -128,11 +147,13 @@ do { \
85129 /*
85130 * Modules have no business playing preemption tricks.
85131 */
85132+#ifndef CONFIG_PAX_KERNEXEC
85133 #undef sched_preempt_enable_no_resched
85134 #undef preempt_enable_no_resched
85135 #undef preempt_enable_no_resched_notrace
85136 #undef preempt_check_resched
85137 #endif
85138+#endif
85139
85140 #define preempt_set_need_resched() \
85141 do { \
85142diff --git a/include/linux/printk.h b/include/linux/printk.h
85143index d78125f..7f36596 100644
85144--- a/include/linux/printk.h
85145+++ b/include/linux/printk.h
85146@@ -124,6 +124,8 @@ static inline __printf(1, 2) __cold
85147 void early_printk(const char *s, ...) { }
85148 #endif
85149
85150+extern int kptr_restrict;
85151+
85152 #ifdef CONFIG_PRINTK
85153 asmlinkage __printf(5, 0)
85154 int vprintk_emit(int facility, int level,
85155@@ -158,7 +160,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
85156
85157 extern int printk_delay_msec;
85158 extern int dmesg_restrict;
85159-extern int kptr_restrict;
85160
85161 extern void wake_up_klogd(void);
85162
85163diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
85164index 9d117f6..d832b31 100644
85165--- a/include/linux/proc_fs.h
85166+++ b/include/linux/proc_fs.h
85167@@ -17,8 +17,11 @@ extern void proc_flush_task(struct task_struct *);
85168 extern struct proc_dir_entry *proc_symlink(const char *,
85169 struct proc_dir_entry *, const char *);
85170 extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
85171+extern struct proc_dir_entry *proc_mkdir_restrict(const char *, struct proc_dir_entry *);
85172 extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
85173 struct proc_dir_entry *, void *);
85174+extern struct proc_dir_entry *proc_mkdir_data_restrict(const char *, umode_t,
85175+ struct proc_dir_entry *, void *);
85176 extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
85177 struct proc_dir_entry *);
85178
85179@@ -34,6 +37,19 @@ static inline struct proc_dir_entry *proc_create(
85180 return proc_create_data(name, mode, parent, proc_fops, NULL);
85181 }
85182
85183+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
85184+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
85185+{
85186+#ifdef CONFIG_GRKERNSEC_PROC_USER
85187+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
85188+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
85189+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
85190+#else
85191+ return proc_create_data(name, mode, parent, proc_fops, NULL);
85192+#endif
85193+}
85194+
85195+
85196 extern void proc_set_size(struct proc_dir_entry *, loff_t);
85197 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
85198 extern void *PDE_DATA(const struct inode *);
85199@@ -56,8 +72,12 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
85200 struct proc_dir_entry *parent,const char *dest) { return NULL;}
85201 static inline struct proc_dir_entry *proc_mkdir(const char *name,
85202 struct proc_dir_entry *parent) {return NULL;}
85203+static inline struct proc_dir_entry *proc_mkdir_restrict(const char *name,
85204+ struct proc_dir_entry *parent) { return NULL; }
85205 static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
85206 umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
85207+static inline struct proc_dir_entry *proc_mkdir_data_restrict(const char *name,
85208+ umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
85209 static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
85210 umode_t mode, struct proc_dir_entry *parent) { return NULL; }
85211 #define proc_create(name, mode, parent, proc_fops) ({NULL;})
85212@@ -77,7 +97,7 @@ static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *p
85213 static inline struct proc_dir_entry *proc_net_mkdir(
85214 struct net *net, const char *name, struct proc_dir_entry *parent)
85215 {
85216- return proc_mkdir_data(name, 0, parent, net);
85217+ return proc_mkdir_data_restrict(name, 0, parent, net);
85218 }
85219
85220 #endif /* _LINUX_PROC_FS_H */
85221diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
85222index 34a1e10..70f6bde 100644
85223--- a/include/linux/proc_ns.h
85224+++ b/include/linux/proc_ns.h
85225@@ -14,7 +14,7 @@ struct proc_ns_operations {
85226 void (*put)(void *ns);
85227 int (*install)(struct nsproxy *nsproxy, void *ns);
85228 unsigned int (*inum)(void *ns);
85229-};
85230+} __do_const __randomize_layout;
85231
85232 struct proc_ns {
85233 void *ns;
85234diff --git a/include/linux/quota.h b/include/linux/quota.h
85235index 80d345a..9e89a9a 100644
85236--- a/include/linux/quota.h
85237+++ b/include/linux/quota.h
85238@@ -70,7 +70,7 @@ struct kqid { /* Type in which we store the quota identifier */
85239
85240 extern bool qid_eq(struct kqid left, struct kqid right);
85241 extern bool qid_lt(struct kqid left, struct kqid right);
85242-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
85243+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
85244 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
85245 extern bool qid_valid(struct kqid qid);
85246
85247diff --git a/include/linux/random.h b/include/linux/random.h
85248index 57fbbff..2170304 100644
85249--- a/include/linux/random.h
85250+++ b/include/linux/random.h
85251@@ -9,9 +9,19 @@
85252 #include <uapi/linux/random.h>
85253
85254 extern void add_device_randomness(const void *, unsigned int);
85255+
85256+static inline void add_latent_entropy(void)
85257+{
85258+
85259+#ifdef LATENT_ENTROPY_PLUGIN
85260+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
85261+#endif
85262+
85263+}
85264+
85265 extern void add_input_randomness(unsigned int type, unsigned int code,
85266- unsigned int value);
85267-extern void add_interrupt_randomness(int irq, int irq_flags);
85268+ unsigned int value) __latent_entropy;
85269+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
85270
85271 extern void get_random_bytes(void *buf, int nbytes);
85272 extern void get_random_bytes_arch(void *buf, int nbytes);
85273@@ -22,10 +32,10 @@ extern int random_int_secret_init(void);
85274 extern const struct file_operations random_fops, urandom_fops;
85275 #endif
85276
85277-unsigned int get_random_int(void);
85278+unsigned int __intentional_overflow(-1) get_random_int(void);
85279 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
85280
85281-u32 prandom_u32(void);
85282+u32 prandom_u32(void) __intentional_overflow(-1);
85283 void prandom_bytes(void *buf, int nbytes);
85284 void prandom_seed(u32 seed);
85285 void prandom_reseed_late(void);
85286@@ -37,6 +47,11 @@ struct rnd_state {
85287 u32 prandom_u32_state(struct rnd_state *state);
85288 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
85289
85290+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
85291+{
85292+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
85293+}
85294+
85295 /**
85296 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
85297 * @ep_ro: right open interval endpoint
85298@@ -49,7 +64,7 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
85299 *
85300 * Returns: pseudo-random number in interval [0, ep_ro)
85301 */
85302-static inline u32 prandom_u32_max(u32 ep_ro)
85303+static inline u32 __intentional_overflow(-1) prandom_u32_max(u32 ep_ro)
85304 {
85305 return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
85306 }
85307diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
85308index fea49b5..2ac22bb 100644
85309--- a/include/linux/rbtree_augmented.h
85310+++ b/include/linux/rbtree_augmented.h
85311@@ -80,7 +80,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
85312 old->rbaugmented = rbcompute(old); \
85313 } \
85314 rbstatic const struct rb_augment_callbacks rbname = { \
85315- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
85316+ .propagate = rbname ## _propagate, \
85317+ .copy = rbname ## _copy, \
85318+ .rotate = rbname ## _rotate \
85319 };
85320
85321
85322diff --git a/include/linux/rculist.h b/include/linux/rculist.h
85323index 372ad5e..d4373f8 100644
85324--- a/include/linux/rculist.h
85325+++ b/include/linux/rculist.h
85326@@ -29,8 +29,8 @@
85327 */
85328 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
85329 {
85330- ACCESS_ONCE(list->next) = list;
85331- ACCESS_ONCE(list->prev) = list;
85332+ ACCESS_ONCE_RW(list->next) = list;
85333+ ACCESS_ONCE_RW(list->prev) = list;
85334 }
85335
85336 /*
85337@@ -59,6 +59,9 @@ void __list_add_rcu(struct list_head *new,
85338 struct list_head *prev, struct list_head *next);
85339 #endif
85340
85341+void __pax_list_add_rcu(struct list_head *new,
85342+ struct list_head *prev, struct list_head *next);
85343+
85344 /**
85345 * list_add_rcu - add a new entry to rcu-protected list
85346 * @new: new entry to be added
85347@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
85348 __list_add_rcu(new, head, head->next);
85349 }
85350
85351+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
85352+{
85353+ __pax_list_add_rcu(new, head, head->next);
85354+}
85355+
85356 /**
85357 * list_add_tail_rcu - add a new entry to rcu-protected list
85358 * @new: new entry to be added
85359@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
85360 __list_add_rcu(new, head->prev, head);
85361 }
85362
85363+static inline void pax_list_add_tail_rcu(struct list_head *new,
85364+ struct list_head *head)
85365+{
85366+ __pax_list_add_rcu(new, head->prev, head);
85367+}
85368+
85369 /**
85370 * list_del_rcu - deletes entry from list without re-initialization
85371 * @entry: the element to delete from the list.
85372@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
85373 entry->prev = LIST_POISON2;
85374 }
85375
85376+extern void pax_list_del_rcu(struct list_head *entry);
85377+
85378 /**
85379 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
85380 * @n: the element to delete from the hash list.
85381diff --git a/include/linux/reboot.h b/include/linux/reboot.h
85382index 48bf152..d38b785 100644
85383--- a/include/linux/reboot.h
85384+++ b/include/linux/reboot.h
85385@@ -44,9 +44,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
85386 */
85387
85388 extern void migrate_to_reboot_cpu(void);
85389-extern void machine_restart(char *cmd);
85390-extern void machine_halt(void);
85391-extern void machine_power_off(void);
85392+extern void machine_restart(char *cmd) __noreturn;
85393+extern void machine_halt(void) __noreturn;
85394+extern void machine_power_off(void) __noreturn;
85395
85396 extern void machine_shutdown(void);
85397 struct pt_regs;
85398@@ -57,9 +57,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
85399 */
85400
85401 extern void kernel_restart_prepare(char *cmd);
85402-extern void kernel_restart(char *cmd);
85403-extern void kernel_halt(void);
85404-extern void kernel_power_off(void);
85405+extern void kernel_restart(char *cmd) __noreturn;
85406+extern void kernel_halt(void) __noreturn;
85407+extern void kernel_power_off(void) __noreturn;
85408
85409 extern int C_A_D; /* for sysctl */
85410 void ctrl_alt_del(void);
85411@@ -73,7 +73,7 @@ extern int orderly_poweroff(bool force);
85412 * Emergency restart, callable from an interrupt handler.
85413 */
85414
85415-extern void emergency_restart(void);
85416+extern void emergency_restart(void) __noreturn;
85417 #include <asm/emergency-restart.h>
85418
85419 #endif /* _LINUX_REBOOT_H */
85420diff --git a/include/linux/regset.h b/include/linux/regset.h
85421index 8e0c9fe..ac4d221 100644
85422--- a/include/linux/regset.h
85423+++ b/include/linux/regset.h
85424@@ -161,7 +161,8 @@ struct user_regset {
85425 unsigned int align;
85426 unsigned int bias;
85427 unsigned int core_note_type;
85428-};
85429+} __do_const;
85430+typedef struct user_regset __no_const user_regset_no_const;
85431
85432 /**
85433 * struct user_regset_view - available regsets
85434diff --git a/include/linux/relay.h b/include/linux/relay.h
85435index d7c8359..818daf5 100644
85436--- a/include/linux/relay.h
85437+++ b/include/linux/relay.h
85438@@ -157,7 +157,7 @@ struct rchan_callbacks
85439 * The callback should return 0 if successful, negative if not.
85440 */
85441 int (*remove_buf_file)(struct dentry *dentry);
85442-};
85443+} __no_const;
85444
85445 /*
85446 * CONFIG_RELAY kernel API, kernel/relay.c
85447diff --git a/include/linux/rio.h b/include/linux/rio.h
85448index 6bda06f..bf39a9b 100644
85449--- a/include/linux/rio.h
85450+++ b/include/linux/rio.h
85451@@ -358,7 +358,7 @@ struct rio_ops {
85452 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
85453 u64 rstart, u32 size, u32 flags);
85454 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
85455-};
85456+} __no_const;
85457
85458 #define RIO_RESOURCE_MEM 0x00000100
85459 #define RIO_RESOURCE_DOORBELL 0x00000200
85460diff --git a/include/linux/rmap.h b/include/linux/rmap.h
85461index be57450..31cf65e 100644
85462--- a/include/linux/rmap.h
85463+++ b/include/linux/rmap.h
85464@@ -144,8 +144,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
85465 void anon_vma_init(void); /* create anon_vma_cachep */
85466 int anon_vma_prepare(struct vm_area_struct *);
85467 void unlink_anon_vmas(struct vm_area_struct *);
85468-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
85469-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
85470+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
85471+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
85472
85473 static inline void anon_vma_merge(struct vm_area_struct *vma,
85474 struct vm_area_struct *next)
85475diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
85476index ed8f9e7..999bc96 100644
85477--- a/include/linux/scatterlist.h
85478+++ b/include/linux/scatterlist.h
85479@@ -1,6 +1,7 @@
85480 #ifndef _LINUX_SCATTERLIST_H
85481 #define _LINUX_SCATTERLIST_H
85482
85483+#include <linux/sched.h>
85484 #include <linux/string.h>
85485 #include <linux/bug.h>
85486 #include <linux/mm.h>
85487@@ -114,6 +115,12 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
85488 #ifdef CONFIG_DEBUG_SG
85489 BUG_ON(!virt_addr_valid(buf));
85490 #endif
85491+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
85492+ if (object_starts_on_stack(buf)) {
85493+ void *adjbuf = buf - current->stack + current->lowmem_stack;
85494+ sg_set_page(sg, virt_to_page(adjbuf), buflen, offset_in_page(adjbuf));
85495+ } else
85496+#endif
85497 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
85498 }
85499
85500diff --git a/include/linux/sched.h b/include/linux/sched.h
85501index 2b1d9e9..10ba706 100644
85502--- a/include/linux/sched.h
85503+++ b/include/linux/sched.h
85504@@ -132,6 +132,7 @@ struct fs_struct;
85505 struct perf_event_context;
85506 struct blk_plug;
85507 struct filename;
85508+struct linux_binprm;
85509
85510 #define VMACACHE_BITS 2
85511 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
85512@@ -374,7 +375,7 @@ extern char __sched_text_start[], __sched_text_end[];
85513 extern int in_sched_functions(unsigned long addr);
85514
85515 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
85516-extern signed long schedule_timeout(signed long timeout);
85517+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
85518 extern signed long schedule_timeout_interruptible(signed long timeout);
85519 extern signed long schedule_timeout_killable(signed long timeout);
85520 extern signed long schedule_timeout_uninterruptible(signed long timeout);
85521@@ -385,6 +386,19 @@ struct nsproxy;
85522 struct user_namespace;
85523
85524 #ifdef CONFIG_MMU
85525+
85526+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
85527+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
85528+#else
85529+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
85530+{
85531+ return 0;
85532+}
85533+#endif
85534+
85535+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
85536+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
85537+
85538 extern void arch_pick_mmap_layout(struct mm_struct *mm);
85539 extern unsigned long
85540 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
85541@@ -682,6 +696,17 @@ struct signal_struct {
85542 #ifdef CONFIG_TASKSTATS
85543 struct taskstats *stats;
85544 #endif
85545+
85546+#ifdef CONFIG_GRKERNSEC
85547+ u32 curr_ip;
85548+ u32 saved_ip;
85549+ u32 gr_saddr;
85550+ u32 gr_daddr;
85551+ u16 gr_sport;
85552+ u16 gr_dport;
85553+ u8 used_accept:1;
85554+#endif
85555+
85556 #ifdef CONFIG_AUDIT
85557 unsigned audit_tty;
85558 unsigned audit_tty_log_passwd;
85559@@ -708,7 +733,7 @@ struct signal_struct {
85560 struct mutex cred_guard_mutex; /* guard against foreign influences on
85561 * credential calculations
85562 * (notably. ptrace) */
85563-};
85564+} __randomize_layout;
85565
85566 /*
85567 * Bits in flags field of signal_struct.
85568@@ -761,6 +786,14 @@ struct user_struct {
85569 struct key *session_keyring; /* UID's default session keyring */
85570 #endif
85571
85572+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
85573+ unsigned char kernel_banned;
85574+#endif
85575+#ifdef CONFIG_GRKERNSEC_BRUTE
85576+ unsigned char suid_banned;
85577+ unsigned long suid_ban_expires;
85578+#endif
85579+
85580 /* Hash table maintenance information */
85581 struct hlist_node uidhash_node;
85582 kuid_t uid;
85583@@ -768,7 +801,7 @@ struct user_struct {
85584 #ifdef CONFIG_PERF_EVENTS
85585 atomic_long_t locked_vm;
85586 #endif
85587-};
85588+} __randomize_layout;
85589
85590 extern int uids_sysfs_init(void);
85591
85592@@ -1224,6 +1257,9 @@ enum perf_event_task_context {
85593 struct task_struct {
85594 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
85595 void *stack;
85596+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
85597+ void *lowmem_stack;
85598+#endif
85599 atomic_t usage;
85600 unsigned int flags; /* per process flags, defined below */
85601 unsigned int ptrace;
85602@@ -1345,8 +1381,8 @@ struct task_struct {
85603 struct list_head thread_node;
85604
85605 struct completion *vfork_done; /* for vfork() */
85606- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
85607- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
85608+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
85609+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
85610
85611 cputime_t utime, stime, utimescaled, stimescaled;
85612 cputime_t gtime;
85613@@ -1371,11 +1407,6 @@ struct task_struct {
85614 struct task_cputime cputime_expires;
85615 struct list_head cpu_timers[3];
85616
85617-/* process credentials */
85618- const struct cred __rcu *real_cred; /* objective and real subjective task
85619- * credentials (COW) */
85620- const struct cred __rcu *cred; /* effective (overridable) subjective task
85621- * credentials (COW) */
85622 char comm[TASK_COMM_LEN]; /* executable name excluding path
85623 - access with [gs]et_task_comm (which lock
85624 it with task_lock())
85625@@ -1393,6 +1424,10 @@ struct task_struct {
85626 #endif
85627 /* CPU-specific state of this task */
85628 struct thread_struct thread;
85629+/* thread_info moved to task_struct */
85630+#ifdef CONFIG_X86
85631+ struct thread_info tinfo;
85632+#endif
85633 /* filesystem information */
85634 struct fs_struct *fs;
85635 /* open file information */
85636@@ -1467,6 +1502,10 @@ struct task_struct {
85637 gfp_t lockdep_reclaim_gfp;
85638 #endif
85639
85640+/* process credentials */
85641+ const struct cred __rcu *real_cred; /* objective and real subjective task
85642+ * credentials (COW) */
85643+
85644 /* journalling filesystem info */
85645 void *journal_info;
85646
85647@@ -1505,6 +1544,10 @@ struct task_struct {
85648 /* cg_list protected by css_set_lock and tsk->alloc_lock */
85649 struct list_head cg_list;
85650 #endif
85651+
85652+ const struct cred __rcu *cred; /* effective (overridable) subjective task
85653+ * credentials (COW) */
85654+
85655 #ifdef CONFIG_FUTEX
85656 struct robust_list_head __user *robust_list;
85657 #ifdef CONFIG_COMPAT
85658@@ -1644,7 +1687,78 @@ struct task_struct {
85659 unsigned int sequential_io;
85660 unsigned int sequential_io_avg;
85661 #endif
85662-};
85663+
85664+#ifdef CONFIG_GRKERNSEC
85665+ /* grsecurity */
85666+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
85667+ u64 exec_id;
85668+#endif
85669+#ifdef CONFIG_GRKERNSEC_SETXID
85670+ const struct cred *delayed_cred;
85671+#endif
85672+ struct dentry *gr_chroot_dentry;
85673+ struct acl_subject_label *acl;
85674+ struct acl_subject_label *tmpacl;
85675+ struct acl_role_label *role;
85676+ struct file *exec_file;
85677+ unsigned long brute_expires;
85678+ u16 acl_role_id;
85679+ u8 inherited;
85680+ /* is this the task that authenticated to the special role */
85681+ u8 acl_sp_role;
85682+ u8 is_writable;
85683+ u8 brute;
85684+ u8 gr_is_chrooted;
85685+#endif
85686+
85687+} __randomize_layout;
85688+
85689+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
85690+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
85691+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
85692+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
85693+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
85694+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
85695+
85696+#ifdef CONFIG_PAX_SOFTMODE
85697+extern int pax_softmode;
85698+#endif
85699+
85700+extern int pax_check_flags(unsigned long *);
85701+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
85702+
85703+/* if tsk != current then task_lock must be held on it */
85704+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
85705+static inline unsigned long pax_get_flags(struct task_struct *tsk)
85706+{
85707+ if (likely(tsk->mm))
85708+ return tsk->mm->pax_flags;
85709+ else
85710+ return 0UL;
85711+}
85712+
85713+/* if tsk != current then task_lock must be held on it */
85714+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
85715+{
85716+ if (likely(tsk->mm)) {
85717+ tsk->mm->pax_flags = flags;
85718+ return 0;
85719+ }
85720+ return -EINVAL;
85721+}
85722+#endif
85723+
85724+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
85725+extern void pax_set_initial_flags(struct linux_binprm *bprm);
85726+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
85727+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
85728+#endif
85729+
85730+struct path;
85731+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
85732+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
85733+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
85734+extern void pax_report_refcount_overflow(struct pt_regs *regs);
85735
85736 /* Future-safe accessor for struct task_struct's cpus_allowed. */
85737 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
85738@@ -1726,7 +1840,7 @@ struct pid_namespace;
85739 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
85740 struct pid_namespace *ns);
85741
85742-static inline pid_t task_pid_nr(struct task_struct *tsk)
85743+static inline pid_t task_pid_nr(const struct task_struct *tsk)
85744 {
85745 return tsk->pid;
85746 }
85747@@ -2097,6 +2211,25 @@ extern u64 sched_clock_cpu(int cpu);
85748
85749 extern void sched_clock_init(void);
85750
85751+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
85752+static inline void populate_stack(void)
85753+{
85754+ struct task_struct *curtask = current;
85755+ int c;
85756+ int *ptr = curtask->stack;
85757+ int *end = curtask->stack + THREAD_SIZE;
85758+
85759+ while (ptr < end) {
85760+ c = *(volatile int *)ptr;
85761+ ptr += PAGE_SIZE/sizeof(int);
85762+ }
85763+}
85764+#else
85765+static inline void populate_stack(void)
85766+{
85767+}
85768+#endif
85769+
85770 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
85771 static inline void sched_clock_tick(void)
85772 {
85773@@ -2230,7 +2363,9 @@ void yield(void);
85774 extern struct exec_domain default_exec_domain;
85775
85776 union thread_union {
85777+#ifndef CONFIG_X86
85778 struct thread_info thread_info;
85779+#endif
85780 unsigned long stack[THREAD_SIZE/sizeof(long)];
85781 };
85782
85783@@ -2263,6 +2398,7 @@ extern struct pid_namespace init_pid_ns;
85784 */
85785
85786 extern struct task_struct *find_task_by_vpid(pid_t nr);
85787+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
85788 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
85789 struct pid_namespace *ns);
85790
85791@@ -2427,7 +2563,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
85792 extern void exit_itimers(struct signal_struct *);
85793 extern void flush_itimer_signals(void);
85794
85795-extern void do_group_exit(int);
85796+extern __noreturn void do_group_exit(int);
85797
85798 extern int do_execve(struct filename *,
85799 const char __user * const __user *,
85800@@ -2642,9 +2778,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
85801
85802 #endif
85803
85804-static inline int object_is_on_stack(void *obj)
85805+static inline int object_starts_on_stack(const void *obj)
85806 {
85807- void *stack = task_stack_page(current);
85808+ const void *stack = task_stack_page(current);
85809
85810 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
85811 }
85812diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
85813index 596a0e0..bea77ec 100644
85814--- a/include/linux/sched/sysctl.h
85815+++ b/include/linux/sched/sysctl.h
85816@@ -34,6 +34,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
85817 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
85818
85819 extern int sysctl_max_map_count;
85820+extern unsigned long sysctl_heap_stack_gap;
85821
85822 extern unsigned int sysctl_sched_latency;
85823 extern unsigned int sysctl_sched_min_granularity;
85824diff --git a/include/linux/security.h b/include/linux/security.h
85825index 623f90e..90b39da 100644
85826--- a/include/linux/security.h
85827+++ b/include/linux/security.h
85828@@ -27,6 +27,7 @@
85829 #include <linux/slab.h>
85830 #include <linux/err.h>
85831 #include <linux/string.h>
85832+#include <linux/grsecurity.h>
85833
85834 struct linux_binprm;
85835 struct cred;
85836@@ -116,8 +117,6 @@ struct seq_file;
85837
85838 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
85839
85840-void reset_security_ops(void);
85841-
85842 #ifdef CONFIG_MMU
85843 extern unsigned long mmap_min_addr;
85844 extern unsigned long dac_mmap_min_addr;
85845@@ -1729,7 +1728,7 @@ struct security_operations {
85846 struct audit_context *actx);
85847 void (*audit_rule_free) (void *lsmrule);
85848 #endif /* CONFIG_AUDIT */
85849-};
85850+} __randomize_layout;
85851
85852 /* prototypes */
85853 extern int security_init(void);
85854diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
85855index dc368b8..e895209 100644
85856--- a/include/linux/semaphore.h
85857+++ b/include/linux/semaphore.h
85858@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
85859 }
85860
85861 extern void down(struct semaphore *sem);
85862-extern int __must_check down_interruptible(struct semaphore *sem);
85863+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
85864 extern int __must_check down_killable(struct semaphore *sem);
85865 extern int __must_check down_trylock(struct semaphore *sem);
85866 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
85867diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
85868index 52e0097..383f21d 100644
85869--- a/include/linux/seq_file.h
85870+++ b/include/linux/seq_file.h
85871@@ -27,6 +27,9 @@ struct seq_file {
85872 struct mutex lock;
85873 const struct seq_operations *op;
85874 int poll_event;
85875+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
85876+ u64 exec_id;
85877+#endif
85878 #ifdef CONFIG_USER_NS
85879 struct user_namespace *user_ns;
85880 #endif
85881@@ -39,6 +42,7 @@ struct seq_operations {
85882 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
85883 int (*show) (struct seq_file *m, void *v);
85884 };
85885+typedef struct seq_operations __no_const seq_operations_no_const;
85886
85887 #define SEQ_SKIP 1
85888
85889@@ -96,6 +100,7 @@ void seq_pad(struct seq_file *m, char c);
85890
85891 char *mangle_path(char *s, const char *p, const char *esc);
85892 int seq_open(struct file *, const struct seq_operations *);
85893+int seq_open_restrict(struct file *, const struct seq_operations *);
85894 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
85895 loff_t seq_lseek(struct file *, loff_t, int);
85896 int seq_release(struct inode *, struct file *);
85897@@ -138,6 +143,7 @@ static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
85898 }
85899
85900 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
85901+int single_open_restrict(struct file *, int (*)(struct seq_file *, void *), void *);
85902 int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
85903 int single_release(struct inode *, struct file *);
85904 void *__seq_open_private(struct file *, const struct seq_operations *, int);
85905diff --git a/include/linux/shm.h b/include/linux/shm.h
85906index 6fb8016..ab4465e 100644
85907--- a/include/linux/shm.h
85908+++ b/include/linux/shm.h
85909@@ -22,6 +22,10 @@ struct shmid_kernel /* private to the kernel */
85910 /* The task created the shm object. NULL if the task is dead. */
85911 struct task_struct *shm_creator;
85912 struct list_head shm_clist; /* list by creator */
85913+#ifdef CONFIG_GRKERNSEC
85914+ u64 shm_createtime;
85915+ pid_t shm_lapid;
85916+#endif
85917 };
85918
85919 /* shm_mode upper byte flags */
85920diff --git a/include/linux/signal.h b/include/linux/signal.h
85921index 750196f..ae7a3a4 100644
85922--- a/include/linux/signal.h
85923+++ b/include/linux/signal.h
85924@@ -292,7 +292,7 @@ static inline void allow_signal(int sig)
85925 * know it'll be handled, so that they don't get converted to
85926 * SIGKILL or just silently dropped.
85927 */
85928- kernel_sigaction(sig, (__force __sighandler_t)2);
85929+ kernel_sigaction(sig, (__force_user __sighandler_t)2);
85930 }
85931
85932 static inline void disallow_signal(int sig)
85933diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
85934index abde271..bc9ece1 100644
85935--- a/include/linux/skbuff.h
85936+++ b/include/linux/skbuff.h
85937@@ -728,7 +728,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
85938 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
85939 int node);
85940 struct sk_buff *build_skb(void *data, unsigned int frag_size);
85941-static inline struct sk_buff *alloc_skb(unsigned int size,
85942+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
85943 gfp_t priority)
85944 {
85945 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
85946@@ -1845,7 +1845,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
85947 return skb->inner_transport_header - skb->inner_network_header;
85948 }
85949
85950-static inline int skb_network_offset(const struct sk_buff *skb)
85951+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
85952 {
85953 return skb_network_header(skb) - skb->data;
85954 }
85955@@ -1917,7 +1917,7 @@ static inline void skb_pop_rcv_encapsulation(struct sk_buff *skb)
85956 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
85957 */
85958 #ifndef NET_SKB_PAD
85959-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
85960+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
85961 #endif
85962
85963 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
85964@@ -2524,7 +2524,7 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
85965 int *err);
85966 unsigned int datagram_poll(struct file *file, struct socket *sock,
85967 struct poll_table_struct *wait);
85968-int skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
85969+int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
85970 struct iovec *to, int size);
85971 int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
85972 struct iovec *iov);
85973@@ -2918,6 +2918,9 @@ static inline void nf_reset(struct sk_buff *skb)
85974 nf_bridge_put(skb->nf_bridge);
85975 skb->nf_bridge = NULL;
85976 #endif
85977+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
85978+ skb->nf_trace = 0;
85979+#endif
85980 }
85981
85982 static inline void nf_reset_trace(struct sk_buff *skb)
85983diff --git a/include/linux/slab.h b/include/linux/slab.h
85984index 1d9abb7..b1e8b10 100644
85985--- a/include/linux/slab.h
85986+++ b/include/linux/slab.h
85987@@ -14,15 +14,29 @@
85988 #include <linux/gfp.h>
85989 #include <linux/types.h>
85990 #include <linux/workqueue.h>
85991-
85992+#include <linux/err.h>
85993
85994 /*
85995 * Flags to pass to kmem_cache_create().
85996 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
85997 */
85998 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
85999+
86000+#ifdef CONFIG_PAX_USERCOPY_SLABS
86001+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
86002+#else
86003+#define SLAB_USERCOPY 0x00000000UL
86004+#endif
86005+
86006 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
86007 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
86008+
86009+#ifdef CONFIG_PAX_MEMORY_SANITIZE
86010+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
86011+#else
86012+#define SLAB_NO_SANITIZE 0x00000000UL
86013+#endif
86014+
86015 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
86016 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
86017 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
86018@@ -98,10 +112,13 @@
86019 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
86020 * Both make kfree a no-op.
86021 */
86022-#define ZERO_SIZE_PTR ((void *)16)
86023+#define ZERO_SIZE_PTR \
86024+({ \
86025+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
86026+ (void *)(-MAX_ERRNO-1L); \
86027+})
86028
86029-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
86030- (unsigned long)ZERO_SIZE_PTR)
86031+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
86032
86033 #include <linux/kmemleak.h>
86034
86035@@ -144,6 +161,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
86036 void kfree(const void *);
86037 void kzfree(const void *);
86038 size_t ksize(const void *);
86039+const char *check_heap_object(const void *ptr, unsigned long n);
86040+bool is_usercopy_object(const void *ptr);
86041
86042 /*
86043 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
86044@@ -176,7 +195,7 @@ struct kmem_cache {
86045 unsigned int align; /* Alignment as calculated */
86046 unsigned long flags; /* Active flags on the slab */
86047 const char *name; /* Slab name for sysfs */
86048- int refcount; /* Use counter */
86049+ atomic_t refcount; /* Use counter */
86050 void (*ctor)(void *); /* Called on object slot creation */
86051 struct list_head list; /* List of all slab caches on the system */
86052 };
86053@@ -261,6 +280,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
86054 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
86055 #endif
86056
86057+#ifdef CONFIG_PAX_USERCOPY_SLABS
86058+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
86059+#endif
86060+
86061 /*
86062 * Figure out which kmalloc slab an allocation of a certain size
86063 * belongs to.
86064@@ -269,7 +292,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
86065 * 2 = 120 .. 192 bytes
86066 * n = 2^(n-1) .. 2^n -1
86067 */
86068-static __always_inline int kmalloc_index(size_t size)
86069+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
86070 {
86071 if (!size)
86072 return 0;
86073@@ -312,11 +335,11 @@ static __always_inline int kmalloc_index(size_t size)
86074 }
86075 #endif /* !CONFIG_SLOB */
86076
86077-void *__kmalloc(size_t size, gfp_t flags);
86078+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
86079 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
86080
86081 #ifdef CONFIG_NUMA
86082-void *__kmalloc_node(size_t size, gfp_t flags, int node);
86083+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
86084 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
86085 #else
86086 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
86087diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
86088index 8235dfb..47ce586 100644
86089--- a/include/linux/slab_def.h
86090+++ b/include/linux/slab_def.h
86091@@ -38,7 +38,7 @@ struct kmem_cache {
86092 /* 4) cache creation/removal */
86093 const char *name;
86094 struct list_head list;
86095- int refcount;
86096+ atomic_t refcount;
86097 int object_size;
86098 int align;
86099
86100@@ -54,10 +54,14 @@ struct kmem_cache {
86101 unsigned long node_allocs;
86102 unsigned long node_frees;
86103 unsigned long node_overflow;
86104- atomic_t allochit;
86105- atomic_t allocmiss;
86106- atomic_t freehit;
86107- atomic_t freemiss;
86108+ atomic_unchecked_t allochit;
86109+ atomic_unchecked_t allocmiss;
86110+ atomic_unchecked_t freehit;
86111+ atomic_unchecked_t freemiss;
86112+#ifdef CONFIG_PAX_MEMORY_SANITIZE
86113+ atomic_unchecked_t sanitized;
86114+ atomic_unchecked_t not_sanitized;
86115+#endif
86116
86117 /*
86118 * If debugging is enabled, then the allocator can add additional
86119diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
86120index d82abd4..408c3a0 100644
86121--- a/include/linux/slub_def.h
86122+++ b/include/linux/slub_def.h
86123@@ -74,7 +74,7 @@ struct kmem_cache {
86124 struct kmem_cache_order_objects max;
86125 struct kmem_cache_order_objects min;
86126 gfp_t allocflags; /* gfp flags to use on each alloc */
86127- int refcount; /* Refcount for slab cache destroy */
86128+ atomic_t refcount; /* Refcount for slab cache destroy */
86129 void (*ctor)(void *);
86130 int inuse; /* Offset to metadata */
86131 int align; /* Alignment */
86132diff --git a/include/linux/smp.h b/include/linux/smp.h
86133index 34347f2..8739978 100644
86134--- a/include/linux/smp.h
86135+++ b/include/linux/smp.h
86136@@ -174,7 +174,9 @@ static inline void kick_all_cpus_sync(void) { }
86137 #endif
86138
86139 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
86140+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
86141 #define put_cpu() preempt_enable()
86142+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
86143
86144 /*
86145 * Callback to arch code if there's nosmp or maxcpus=0 on the
86146diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
86147index 46cca4c..3323536 100644
86148--- a/include/linux/sock_diag.h
86149+++ b/include/linux/sock_diag.h
86150@@ -11,7 +11,7 @@ struct sock;
86151 struct sock_diag_handler {
86152 __u8 family;
86153 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
86154-};
86155+} __do_const;
86156
86157 int sock_diag_register(const struct sock_diag_handler *h);
86158 void sock_diag_unregister(const struct sock_diag_handler *h);
86159diff --git a/include/linux/sonet.h b/include/linux/sonet.h
86160index 680f9a3..f13aeb0 100644
86161--- a/include/linux/sonet.h
86162+++ b/include/linux/sonet.h
86163@@ -7,7 +7,7 @@
86164 #include <uapi/linux/sonet.h>
86165
86166 struct k_sonet_stats {
86167-#define __HANDLE_ITEM(i) atomic_t i
86168+#define __HANDLE_ITEM(i) atomic_unchecked_t i
86169 __SONET_ITEMS
86170 #undef __HANDLE_ITEM
86171 };
86172diff --git a/include/linux/string.h b/include/linux/string.h
86173index d36977e..3b42b37 100644
86174--- a/include/linux/string.h
86175+++ b/include/linux/string.h
86176@@ -132,7 +132,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
86177 #endif
86178
86179 extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
86180- const void *from, size_t available);
86181+ const void *from, size_t available);
86182
86183 /**
86184 * strstarts - does @str start with @prefix?
86185@@ -144,7 +144,8 @@ static inline bool strstarts(const char *str, const char *prefix)
86186 return strncmp(str, prefix, strlen(prefix)) == 0;
86187 }
86188
86189-extern size_t memweight(const void *ptr, size_t bytes);
86190+size_t memweight(const void *ptr, size_t bytes);
86191+void memzero_explicit(void *s, size_t count);
86192
86193 /**
86194 * kbasename - return the last part of a pathname.
86195diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
86196index 07d8e53..dc934c9 100644
86197--- a/include/linux/sunrpc/addr.h
86198+++ b/include/linux/sunrpc/addr.h
86199@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
86200 {
86201 switch (sap->sa_family) {
86202 case AF_INET:
86203- return ntohs(((struct sockaddr_in *)sap)->sin_port);
86204+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
86205 case AF_INET6:
86206- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
86207+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
86208 }
86209 return 0;
86210 }
86211@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
86212 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
86213 const struct sockaddr *src)
86214 {
86215- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
86216+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
86217 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
86218
86219 dsin->sin_family = ssin->sin_family;
86220@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
86221 if (sa->sa_family != AF_INET6)
86222 return 0;
86223
86224- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
86225+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
86226 }
86227
86228 #endif /* _LINUX_SUNRPC_ADDR_H */
86229diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
86230index 70736b9..37f33db 100644
86231--- a/include/linux/sunrpc/clnt.h
86232+++ b/include/linux/sunrpc/clnt.h
86233@@ -97,7 +97,7 @@ struct rpc_procinfo {
86234 unsigned int p_timer; /* Which RTT timer to use */
86235 u32 p_statidx; /* Which procedure to account */
86236 const char * p_name; /* name of procedure */
86237-};
86238+} __do_const;
86239
86240 #ifdef __KERNEL__
86241
86242diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
86243index cf61ecd..a4a9bc0 100644
86244--- a/include/linux/sunrpc/svc.h
86245+++ b/include/linux/sunrpc/svc.h
86246@@ -417,7 +417,7 @@ struct svc_procedure {
86247 unsigned int pc_count; /* call count */
86248 unsigned int pc_cachetype; /* cache info (NFS) */
86249 unsigned int pc_xdrressize; /* maximum size of XDR reply */
86250-};
86251+} __do_const;
86252
86253 /*
86254 * Function prototypes.
86255diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
86256index 975da75..318c083 100644
86257--- a/include/linux/sunrpc/svc_rdma.h
86258+++ b/include/linux/sunrpc/svc_rdma.h
86259@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
86260 extern unsigned int svcrdma_max_requests;
86261 extern unsigned int svcrdma_max_req_size;
86262
86263-extern atomic_t rdma_stat_recv;
86264-extern atomic_t rdma_stat_read;
86265-extern atomic_t rdma_stat_write;
86266-extern atomic_t rdma_stat_sq_starve;
86267-extern atomic_t rdma_stat_rq_starve;
86268-extern atomic_t rdma_stat_rq_poll;
86269-extern atomic_t rdma_stat_rq_prod;
86270-extern atomic_t rdma_stat_sq_poll;
86271-extern atomic_t rdma_stat_sq_prod;
86272+extern atomic_unchecked_t rdma_stat_recv;
86273+extern atomic_unchecked_t rdma_stat_read;
86274+extern atomic_unchecked_t rdma_stat_write;
86275+extern atomic_unchecked_t rdma_stat_sq_starve;
86276+extern atomic_unchecked_t rdma_stat_rq_starve;
86277+extern atomic_unchecked_t rdma_stat_rq_poll;
86278+extern atomic_unchecked_t rdma_stat_rq_prod;
86279+extern atomic_unchecked_t rdma_stat_sq_poll;
86280+extern atomic_unchecked_t rdma_stat_sq_prod;
86281
86282 #define RPCRDMA_VERSION 1
86283
86284diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
86285index 8d71d65..f79586e 100644
86286--- a/include/linux/sunrpc/svcauth.h
86287+++ b/include/linux/sunrpc/svcauth.h
86288@@ -120,7 +120,7 @@ struct auth_ops {
86289 int (*release)(struct svc_rqst *rq);
86290 void (*domain_release)(struct auth_domain *);
86291 int (*set_client)(struct svc_rqst *rq);
86292-};
86293+} __do_const;
86294
86295 #define SVC_GARBAGE 1
86296 #define SVC_SYSERR 2
86297diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
86298index e7a018e..49f8b17 100644
86299--- a/include/linux/swiotlb.h
86300+++ b/include/linux/swiotlb.h
86301@@ -60,7 +60,8 @@ extern void
86302
86303 extern void
86304 swiotlb_free_coherent(struct device *hwdev, size_t size,
86305- void *vaddr, dma_addr_t dma_handle);
86306+ void *vaddr, dma_addr_t dma_handle,
86307+ struct dma_attrs *attrs);
86308
86309 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
86310 unsigned long offset, size_t size,
86311diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
86312index 0f86d85..dff3419 100644
86313--- a/include/linux/syscalls.h
86314+++ b/include/linux/syscalls.h
86315@@ -98,10 +98,16 @@ struct sigaltstack;
86316 #define __MAP(n,...) __MAP##n(__VA_ARGS__)
86317
86318 #define __SC_DECL(t, a) t a
86319+#define __TYPE_IS_U(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
86320 #define __TYPE_IS_L(t) (__same_type((t)0, 0L))
86321 #define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
86322 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
86323-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
86324+#define __SC_LONG(t, a) __typeof( \
86325+ __builtin_choose_expr( \
86326+ sizeof(t) > sizeof(int), \
86327+ (t) 0, \
86328+ __builtin_choose_expr(__TYPE_IS_U(t), 0UL, 0L) \
86329+ )) a
86330 #define __SC_CAST(t, a) (t) a
86331 #define __SC_ARGS(t, a) a
86332 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
86333@@ -383,11 +389,11 @@ asmlinkage long sys_sync(void);
86334 asmlinkage long sys_fsync(unsigned int fd);
86335 asmlinkage long sys_fdatasync(unsigned int fd);
86336 asmlinkage long sys_bdflush(int func, long data);
86337-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
86338- char __user *type, unsigned long flags,
86339+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
86340+ const char __user *type, unsigned long flags,
86341 void __user *data);
86342-asmlinkage long sys_umount(char __user *name, int flags);
86343-asmlinkage long sys_oldumount(char __user *name);
86344+asmlinkage long sys_umount(const char __user *name, int flags);
86345+asmlinkage long sys_oldumount(const char __user *name);
86346 asmlinkage long sys_truncate(const char __user *path, long length);
86347 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
86348 asmlinkage long sys_stat(const char __user *filename,
86349@@ -599,7 +605,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
86350 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
86351 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
86352 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
86353- struct sockaddr __user *, int);
86354+ struct sockaddr __user *, int) __intentional_overflow(0);
86355 asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
86356 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
86357 unsigned int vlen, unsigned flags);
86358diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
86359index 27b3b0b..e093dd9 100644
86360--- a/include/linux/syscore_ops.h
86361+++ b/include/linux/syscore_ops.h
86362@@ -16,7 +16,7 @@ struct syscore_ops {
86363 int (*suspend)(void);
86364 void (*resume)(void);
86365 void (*shutdown)(void);
86366-};
86367+} __do_const;
86368
86369 extern void register_syscore_ops(struct syscore_ops *ops);
86370 extern void unregister_syscore_ops(struct syscore_ops *ops);
86371diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
86372index b7361f8..341a15a 100644
86373--- a/include/linux/sysctl.h
86374+++ b/include/linux/sysctl.h
86375@@ -39,6 +39,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
86376
86377 extern int proc_dostring(struct ctl_table *, int,
86378 void __user *, size_t *, loff_t *);
86379+extern int proc_dostring_modpriv(struct ctl_table *, int,
86380+ void __user *, size_t *, loff_t *);
86381 extern int proc_dointvec(struct ctl_table *, int,
86382 void __user *, size_t *, loff_t *);
86383 extern int proc_dointvec_minmax(struct ctl_table *, int,
86384@@ -113,7 +115,8 @@ struct ctl_table
86385 struct ctl_table_poll *poll;
86386 void *extra1;
86387 void *extra2;
86388-};
86389+} __do_const __randomize_layout;
86390+typedef struct ctl_table __no_const ctl_table_no_const;
86391
86392 struct ctl_node {
86393 struct rb_node node;
86394diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
86395index f97d0db..c1187dc 100644
86396--- a/include/linux/sysfs.h
86397+++ b/include/linux/sysfs.h
86398@@ -34,7 +34,8 @@ struct attribute {
86399 struct lock_class_key *key;
86400 struct lock_class_key skey;
86401 #endif
86402-};
86403+} __do_const;
86404+typedef struct attribute __no_const attribute_no_const;
86405
86406 /**
86407 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
86408@@ -63,7 +64,8 @@ struct attribute_group {
86409 struct attribute *, int);
86410 struct attribute **attrs;
86411 struct bin_attribute **bin_attrs;
86412-};
86413+} __do_const;
86414+typedef struct attribute_group __no_const attribute_group_no_const;
86415
86416 /**
86417 * Use these macros to make defining attributes easier. See include/linux/device.h
86418@@ -128,7 +130,8 @@ struct bin_attribute {
86419 char *, loff_t, size_t);
86420 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
86421 struct vm_area_struct *vma);
86422-};
86423+} __do_const;
86424+typedef struct bin_attribute __no_const bin_attribute_no_const;
86425
86426 /**
86427 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
86428diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
86429index 387fa7d..3fcde6b 100644
86430--- a/include/linux/sysrq.h
86431+++ b/include/linux/sysrq.h
86432@@ -16,6 +16,7 @@
86433
86434 #include <linux/errno.h>
86435 #include <linux/types.h>
86436+#include <linux/compiler.h>
86437
86438 /* Possible values of bitmask for enabling sysrq functions */
86439 /* 0x0001 is reserved for enable everything */
86440@@ -33,7 +34,7 @@ struct sysrq_key_op {
86441 char *help_msg;
86442 char *action_msg;
86443 int enable_mask;
86444-};
86445+} __do_const;
86446
86447 #ifdef CONFIG_MAGIC_SYSRQ
86448
86449diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
86450index ff307b5..f1a4468 100644
86451--- a/include/linux/thread_info.h
86452+++ b/include/linux/thread_info.h
86453@@ -145,6 +145,13 @@ static inline bool test_and_clear_restore_sigmask(void)
86454 #error "no set_restore_sigmask() provided and default one won't work"
86455 #endif
86456
86457+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size);
86458+
86459+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
86460+{
86461+ __check_object_size(ptr, n, to_user, __builtin_constant_p(n));
86462+}
86463+
86464 #endif /* __KERNEL__ */
86465
86466 #endif /* _LINUX_THREAD_INFO_H */
86467diff --git a/include/linux/tty.h b/include/linux/tty.h
86468index 8413294..44391c7 100644
86469--- a/include/linux/tty.h
86470+++ b/include/linux/tty.h
86471@@ -202,7 +202,7 @@ struct tty_port {
86472 const struct tty_port_operations *ops; /* Port operations */
86473 spinlock_t lock; /* Lock protecting tty field */
86474 int blocked_open; /* Waiting to open */
86475- int count; /* Usage count */
86476+ atomic_t count; /* Usage count */
86477 wait_queue_head_t open_wait; /* Open waiters */
86478 wait_queue_head_t close_wait; /* Close waiters */
86479 wait_queue_head_t delta_msr_wait; /* Modem status change */
86480@@ -284,7 +284,7 @@ struct tty_struct {
86481 /* If the tty has a pending do_SAK, queue it here - akpm */
86482 struct work_struct SAK_work;
86483 struct tty_port *port;
86484-};
86485+} __randomize_layout;
86486
86487 /* Each of a tty's open files has private_data pointing to tty_file_private */
86488 struct tty_file_private {
86489@@ -548,7 +548,7 @@ extern int tty_port_open(struct tty_port *port,
86490 struct tty_struct *tty, struct file *filp);
86491 static inline int tty_port_users(struct tty_port *port)
86492 {
86493- return port->count + port->blocked_open;
86494+ return atomic_read(&port->count) + port->blocked_open;
86495 }
86496
86497 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
86498diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
86499index e48c608..6a19af2 100644
86500--- a/include/linux/tty_driver.h
86501+++ b/include/linux/tty_driver.h
86502@@ -287,7 +287,7 @@ struct tty_operations {
86503 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
86504 #endif
86505 const struct file_operations *proc_fops;
86506-};
86507+} __do_const __randomize_layout;
86508
86509 struct tty_driver {
86510 int magic; /* magic number for this structure */
86511@@ -321,7 +321,7 @@ struct tty_driver {
86512
86513 const struct tty_operations *ops;
86514 struct list_head tty_drivers;
86515-};
86516+} __randomize_layout;
86517
86518 extern struct list_head tty_drivers;
86519
86520diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
86521index 00c9d68..bc0188b 100644
86522--- a/include/linux/tty_ldisc.h
86523+++ b/include/linux/tty_ldisc.h
86524@@ -215,7 +215,7 @@ struct tty_ldisc_ops {
86525
86526 struct module *owner;
86527
86528- int refcount;
86529+ atomic_t refcount;
86530 };
86531
86532 struct tty_ldisc {
86533diff --git a/include/linux/types.h b/include/linux/types.h
86534index a0bb704..f511c77 100644
86535--- a/include/linux/types.h
86536+++ b/include/linux/types.h
86537@@ -177,10 +177,26 @@ typedef struct {
86538 int counter;
86539 } atomic_t;
86540
86541+#ifdef CONFIG_PAX_REFCOUNT
86542+typedef struct {
86543+ int counter;
86544+} atomic_unchecked_t;
86545+#else
86546+typedef atomic_t atomic_unchecked_t;
86547+#endif
86548+
86549 #ifdef CONFIG_64BIT
86550 typedef struct {
86551 long counter;
86552 } atomic64_t;
86553+
86554+#ifdef CONFIG_PAX_REFCOUNT
86555+typedef struct {
86556+ long counter;
86557+} atomic64_unchecked_t;
86558+#else
86559+typedef atomic64_t atomic64_unchecked_t;
86560+#endif
86561 #endif
86562
86563 struct list_head {
86564diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
86565index ecd3319..8a36ded 100644
86566--- a/include/linux/uaccess.h
86567+++ b/include/linux/uaccess.h
86568@@ -75,11 +75,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
86569 long ret; \
86570 mm_segment_t old_fs = get_fs(); \
86571 \
86572- set_fs(KERNEL_DS); \
86573 pagefault_disable(); \
86574- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
86575- pagefault_enable(); \
86576+ set_fs(KERNEL_DS); \
86577+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
86578 set_fs(old_fs); \
86579+ pagefault_enable(); \
86580 ret; \
86581 })
86582
86583diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
86584index 2d1f9b6..d7a9fce 100644
86585--- a/include/linux/uidgid.h
86586+++ b/include/linux/uidgid.h
86587@@ -175,4 +175,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
86588
86589 #endif /* CONFIG_USER_NS */
86590
86591+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
86592+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
86593+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
86594+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
86595+
86596 #endif /* _LINUX_UIDGID_H */
86597diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
86598index 99c1b4d..562e6f3 100644
86599--- a/include/linux/unaligned/access_ok.h
86600+++ b/include/linux/unaligned/access_ok.h
86601@@ -4,34 +4,34 @@
86602 #include <linux/kernel.h>
86603 #include <asm/byteorder.h>
86604
86605-static inline u16 get_unaligned_le16(const void *p)
86606+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
86607 {
86608- return le16_to_cpup((__le16 *)p);
86609+ return le16_to_cpup((const __le16 *)p);
86610 }
86611
86612-static inline u32 get_unaligned_le32(const void *p)
86613+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
86614 {
86615- return le32_to_cpup((__le32 *)p);
86616+ return le32_to_cpup((const __le32 *)p);
86617 }
86618
86619-static inline u64 get_unaligned_le64(const void *p)
86620+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
86621 {
86622- return le64_to_cpup((__le64 *)p);
86623+ return le64_to_cpup((const __le64 *)p);
86624 }
86625
86626-static inline u16 get_unaligned_be16(const void *p)
86627+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
86628 {
86629- return be16_to_cpup((__be16 *)p);
86630+ return be16_to_cpup((const __be16 *)p);
86631 }
86632
86633-static inline u32 get_unaligned_be32(const void *p)
86634+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
86635 {
86636- return be32_to_cpup((__be32 *)p);
86637+ return be32_to_cpup((const __be32 *)p);
86638 }
86639
86640-static inline u64 get_unaligned_be64(const void *p)
86641+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
86642 {
86643- return be64_to_cpup((__be64 *)p);
86644+ return be64_to_cpup((const __be64 *)p);
86645 }
86646
86647 static inline void put_unaligned_le16(u16 val, void *p)
86648diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
86649index 4f844c6..60beb5d 100644
86650--- a/include/linux/uprobes.h
86651+++ b/include/linux/uprobes.h
86652@@ -98,11 +98,11 @@ struct uprobes_state {
86653 struct xol_area *xol_area;
86654 };
86655
86656-extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
86657-extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
86658-extern bool __weak is_swbp_insn(uprobe_opcode_t *insn);
86659-extern bool __weak is_trap_insn(uprobe_opcode_t *insn);
86660-extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
86661+extern int set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
86662+extern int set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
86663+extern bool is_swbp_insn(uprobe_opcode_t *insn);
86664+extern bool is_trap_insn(uprobe_opcode_t *insn);
86665+extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs);
86666 extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs);
86667 extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
86668 extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
86669@@ -128,8 +128,8 @@ extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
86670 extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
86671 extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
86672 extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
86673-extern bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
86674-extern void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
86675+extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
86676+extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
86677 void *src, unsigned long len);
86678 #else /* !CONFIG_UPROBES */
86679 struct uprobes_state {
86680diff --git a/include/linux/usb.h b/include/linux/usb.h
86681index d2465bc..5256de4 100644
86682--- a/include/linux/usb.h
86683+++ b/include/linux/usb.h
86684@@ -571,7 +571,7 @@ struct usb_device {
86685 int maxchild;
86686
86687 u32 quirks;
86688- atomic_t urbnum;
86689+ atomic_unchecked_t urbnum;
86690
86691 unsigned long active_duration;
86692
86693@@ -1655,7 +1655,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
86694
86695 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
86696 __u8 request, __u8 requesttype, __u16 value, __u16 index,
86697- void *data, __u16 size, int timeout);
86698+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
86699 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
86700 void *data, int len, int *actual_length, int timeout);
86701 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
86702diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
86703index d5952bb..9a626d4 100644
86704--- a/include/linux/usb/renesas_usbhs.h
86705+++ b/include/linux/usb/renesas_usbhs.h
86706@@ -39,7 +39,7 @@ enum {
86707 */
86708 struct renesas_usbhs_driver_callback {
86709 int (*notify_hotplug)(struct platform_device *pdev);
86710-};
86711+} __no_const;
86712
86713 /*
86714 * callback functions for platform
86715diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
86716index e953726..8edb26a 100644
86717--- a/include/linux/user_namespace.h
86718+++ b/include/linux/user_namespace.h
86719@@ -33,7 +33,7 @@ struct user_namespace {
86720 struct key *persistent_keyring_register;
86721 struct rw_semaphore persistent_keyring_register_sem;
86722 #endif
86723-};
86724+} __randomize_layout;
86725
86726 extern struct user_namespace init_user_ns;
86727
86728diff --git a/include/linux/utsname.h b/include/linux/utsname.h
86729index 239e277..22a5cf5 100644
86730--- a/include/linux/utsname.h
86731+++ b/include/linux/utsname.h
86732@@ -24,7 +24,7 @@ struct uts_namespace {
86733 struct new_utsname name;
86734 struct user_namespace *user_ns;
86735 unsigned int proc_inum;
86736-};
86737+} __randomize_layout;
86738 extern struct uts_namespace init_uts_ns;
86739
86740 #ifdef CONFIG_UTS_NS
86741diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
86742index 6f8fbcf..4efc177 100644
86743--- a/include/linux/vermagic.h
86744+++ b/include/linux/vermagic.h
86745@@ -25,9 +25,42 @@
86746 #define MODULE_ARCH_VERMAGIC ""
86747 #endif
86748
86749+#ifdef CONFIG_PAX_REFCOUNT
86750+#define MODULE_PAX_REFCOUNT "REFCOUNT "
86751+#else
86752+#define MODULE_PAX_REFCOUNT ""
86753+#endif
86754+
86755+#ifdef CONSTIFY_PLUGIN
86756+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
86757+#else
86758+#define MODULE_CONSTIFY_PLUGIN ""
86759+#endif
86760+
86761+#ifdef STACKLEAK_PLUGIN
86762+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
86763+#else
86764+#define MODULE_STACKLEAK_PLUGIN ""
86765+#endif
86766+
86767+#ifdef RANDSTRUCT_PLUGIN
86768+#include <generated/randomize_layout_hash.h>
86769+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
86770+#else
86771+#define MODULE_RANDSTRUCT_PLUGIN
86772+#endif
86773+
86774+#ifdef CONFIG_GRKERNSEC
86775+#define MODULE_GRSEC "GRSEC "
86776+#else
86777+#define MODULE_GRSEC ""
86778+#endif
86779+
86780 #define VERMAGIC_STRING \
86781 UTS_RELEASE " " \
86782 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
86783 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
86784- MODULE_ARCH_VERMAGIC
86785+ MODULE_ARCH_VERMAGIC \
86786+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
86787+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
86788
86789diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
86790index b483abd..af305ad 100644
86791--- a/include/linux/vga_switcheroo.h
86792+++ b/include/linux/vga_switcheroo.h
86793@@ -63,9 +63,9 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
86794
86795 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
86796
86797-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
86798+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
86799 void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
86800-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
86801+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
86802 #else
86803
86804 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
86805@@ -82,9 +82,9 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
86806
86807 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
86808
86809-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
86810+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
86811 static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
86812-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
86813+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
86814
86815 #endif
86816 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
86817diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
86818index b87696f..1d11de7 100644
86819--- a/include/linux/vmalloc.h
86820+++ b/include/linux/vmalloc.h
86821@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
86822 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
86823 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
86824 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
86825+
86826+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
86827+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
86828+#endif
86829+
86830 /* bits [20..32] reserved for arch specific ioremap internals */
86831
86832 /*
86833@@ -82,6 +87,10 @@ extern void *vmap(struct page **pages, unsigned int count,
86834 unsigned long flags, pgprot_t prot);
86835 extern void vunmap(const void *addr);
86836
86837+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
86838+extern void unmap_process_stacks(struct task_struct *task);
86839+#endif
86840+
86841 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
86842 unsigned long uaddr, void *kaddr,
86843 unsigned long size);
86844@@ -142,7 +151,7 @@ extern void free_vm_area(struct vm_struct *area);
86845
86846 /* for /dev/kmem */
86847 extern long vread(char *buf, char *addr, unsigned long count);
86848-extern long vwrite(char *buf, char *addr, unsigned long count);
86849+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
86850
86851 /*
86852 * Internals. Dont't use..
86853diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
86854index 82e7db7..f8ce3d0 100644
86855--- a/include/linux/vmstat.h
86856+++ b/include/linux/vmstat.h
86857@@ -108,18 +108,18 @@ static inline void vm_events_fold_cpu(int cpu)
86858 /*
86859 * Zone based page accounting with per cpu differentials.
86860 */
86861-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
86862+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
86863
86864 static inline void zone_page_state_add(long x, struct zone *zone,
86865 enum zone_stat_item item)
86866 {
86867- atomic_long_add(x, &zone->vm_stat[item]);
86868- atomic_long_add(x, &vm_stat[item]);
86869+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
86870+ atomic_long_add_unchecked(x, &vm_stat[item]);
86871 }
86872
86873-static inline unsigned long global_page_state(enum zone_stat_item item)
86874+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
86875 {
86876- long x = atomic_long_read(&vm_stat[item]);
86877+ long x = atomic_long_read_unchecked(&vm_stat[item]);
86878 #ifdef CONFIG_SMP
86879 if (x < 0)
86880 x = 0;
86881@@ -127,10 +127,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
86882 return x;
86883 }
86884
86885-static inline unsigned long zone_page_state(struct zone *zone,
86886+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
86887 enum zone_stat_item item)
86888 {
86889- long x = atomic_long_read(&zone->vm_stat[item]);
86890+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
86891 #ifdef CONFIG_SMP
86892 if (x < 0)
86893 x = 0;
86894@@ -147,7 +147,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
86895 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
86896 enum zone_stat_item item)
86897 {
86898- long x = atomic_long_read(&zone->vm_stat[item]);
86899+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
86900
86901 #ifdef CONFIG_SMP
86902 int cpu;
86903@@ -234,14 +234,14 @@ static inline void __mod_zone_page_state(struct zone *zone,
86904
86905 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
86906 {
86907- atomic_long_inc(&zone->vm_stat[item]);
86908- atomic_long_inc(&vm_stat[item]);
86909+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
86910+ atomic_long_inc_unchecked(&vm_stat[item]);
86911 }
86912
86913 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
86914 {
86915- atomic_long_dec(&zone->vm_stat[item]);
86916- atomic_long_dec(&vm_stat[item]);
86917+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
86918+ atomic_long_dec_unchecked(&vm_stat[item]);
86919 }
86920
86921 static inline void __inc_zone_page_state(struct page *page,
86922diff --git a/include/linux/xattr.h b/include/linux/xattr.h
86923index 91b0a68..0e9adf6 100644
86924--- a/include/linux/xattr.h
86925+++ b/include/linux/xattr.h
86926@@ -28,7 +28,7 @@ struct xattr_handler {
86927 size_t size, int handler_flags);
86928 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
86929 size_t size, int flags, int handler_flags);
86930-};
86931+} __do_const;
86932
86933 struct xattr {
86934 const char *name;
86935@@ -37,6 +37,9 @@ struct xattr {
86936 };
86937
86938 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
86939+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
86940+ssize_t pax_getxattr(struct dentry *, void *, size_t);
86941+#endif
86942 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
86943 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
86944 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
86945diff --git a/include/linux/zlib.h b/include/linux/zlib.h
86946index 92dbbd3..13ab0b3 100644
86947--- a/include/linux/zlib.h
86948+++ b/include/linux/zlib.h
86949@@ -31,6 +31,7 @@
86950 #define _ZLIB_H
86951
86952 #include <linux/zconf.h>
86953+#include <linux/compiler.h>
86954
86955 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
86956 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
86957@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
86958
86959 /* basic functions */
86960
86961-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
86962+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
86963 /*
86964 Returns the number of bytes that needs to be allocated for a per-
86965 stream workspace with the specified parameters. A pointer to this
86966diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
86967index eb76cfd..9fd0e7c 100644
86968--- a/include/media/v4l2-dev.h
86969+++ b/include/media/v4l2-dev.h
86970@@ -75,7 +75,7 @@ struct v4l2_file_operations {
86971 int (*mmap) (struct file *, struct vm_area_struct *);
86972 int (*open) (struct file *);
86973 int (*release) (struct file *);
86974-};
86975+} __do_const;
86976
86977 /*
86978 * Newer version of video_device, handled by videodev2.c
86979diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
86980index ffb69da..040393e 100644
86981--- a/include/media/v4l2-device.h
86982+++ b/include/media/v4l2-device.h
86983@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
86984 this function returns 0. If the name ends with a digit (e.g. cx18),
86985 then the name will be set to cx18-0 since cx180 looks really odd. */
86986 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
86987- atomic_t *instance);
86988+ atomic_unchecked_t *instance);
86989
86990 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
86991 Since the parent disappears this ensures that v4l2_dev doesn't have an
86992diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
86993index d9fa68f..45c88d1 100644
86994--- a/include/net/9p/transport.h
86995+++ b/include/net/9p/transport.h
86996@@ -63,7 +63,7 @@ struct p9_trans_module {
86997 int (*cancelled)(struct p9_client *, struct p9_req_t *req);
86998 int (*zc_request)(struct p9_client *, struct p9_req_t *,
86999 char *, char *, int , int, int, int);
87000-};
87001+} __do_const;
87002
87003 void v9fs_register_trans(struct p9_trans_module *m);
87004 void v9fs_unregister_trans(struct p9_trans_module *m);
87005diff --git a/include/net/af_unix.h b/include/net/af_unix.h
87006index a175ba4..196eb82 100644
87007--- a/include/net/af_unix.h
87008+++ b/include/net/af_unix.h
87009@@ -36,7 +36,7 @@ struct unix_skb_parms {
87010 u32 secid; /* Security ID */
87011 #endif
87012 u32 consumed;
87013-};
87014+} __randomize_layout;
87015
87016 #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
87017 #define UNIXSID(skb) (&UNIXCB((skb)).secid)
87018diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
87019index 8df15ad..837fbedd 100644
87020--- a/include/net/bluetooth/l2cap.h
87021+++ b/include/net/bluetooth/l2cap.h
87022@@ -608,7 +608,7 @@ struct l2cap_ops {
87023 unsigned char *kdata,
87024 struct iovec *iov,
87025 int len);
87026-};
87027+} __do_const;
87028
87029 struct l2cap_conn {
87030 struct hci_conn *hcon;
87031diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
87032index f2ae33d..c457cf0 100644
87033--- a/include/net/caif/cfctrl.h
87034+++ b/include/net/caif/cfctrl.h
87035@@ -52,7 +52,7 @@ struct cfctrl_rsp {
87036 void (*radioset_rsp)(void);
87037 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
87038 struct cflayer *client_layer);
87039-};
87040+} __no_const;
87041
87042 /* Link Setup Parameters for CAIF-Links. */
87043 struct cfctrl_link_param {
87044@@ -101,8 +101,8 @@ struct cfctrl_request_info {
87045 struct cfctrl {
87046 struct cfsrvl serv;
87047 struct cfctrl_rsp res;
87048- atomic_t req_seq_no;
87049- atomic_t rsp_seq_no;
87050+ atomic_unchecked_t req_seq_no;
87051+ atomic_unchecked_t rsp_seq_no;
87052 struct list_head list;
87053 /* Protects from simultaneous access to first_req list */
87054 spinlock_t info_list_lock;
87055diff --git a/include/net/flow.h b/include/net/flow.h
87056index 8109a15..504466d 100644
87057--- a/include/net/flow.h
87058+++ b/include/net/flow.h
87059@@ -231,6 +231,6 @@ void flow_cache_fini(struct net *net);
87060
87061 void flow_cache_flush(struct net *net);
87062 void flow_cache_flush_deferred(struct net *net);
87063-extern atomic_t flow_cache_genid;
87064+extern atomic_unchecked_t flow_cache_genid;
87065
87066 #endif
87067diff --git a/include/net/genetlink.h b/include/net/genetlink.h
87068index af10c2c..a431cc5 100644
87069--- a/include/net/genetlink.h
87070+++ b/include/net/genetlink.h
87071@@ -120,7 +120,7 @@ struct genl_ops {
87072 u8 cmd;
87073 u8 internal_flags;
87074 u8 flags;
87075-};
87076+} __do_const;
87077
87078 int __genl_register_family(struct genl_family *family);
87079
87080diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
87081index 734d9b5..48a9a4b 100644
87082--- a/include/net/gro_cells.h
87083+++ b/include/net/gro_cells.h
87084@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
87085 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
87086
87087 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
87088- atomic_long_inc(&dev->rx_dropped);
87089+ atomic_long_inc_unchecked(&dev->rx_dropped);
87090 kfree_skb(skb);
87091 return;
87092 }
87093diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
87094index 5fbe656..9ed3d8b 100644
87095--- a/include/net/inet_connection_sock.h
87096+++ b/include/net/inet_connection_sock.h
87097@@ -63,7 +63,7 @@ struct inet_connection_sock_af_ops {
87098 int (*bind_conflict)(const struct sock *sk,
87099 const struct inet_bind_bucket *tb, bool relax);
87100 void (*mtu_reduced)(struct sock *sk);
87101-};
87102+} __do_const;
87103
87104 /** inet_connection_sock - INET connection oriented sock
87105 *
87106diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
87107index 01d590e..f69c61d 100644
87108--- a/include/net/inetpeer.h
87109+++ b/include/net/inetpeer.h
87110@@ -47,7 +47,7 @@ struct inet_peer {
87111 */
87112 union {
87113 struct {
87114- atomic_t rid; /* Frag reception counter */
87115+ atomic_unchecked_t rid; /* Frag reception counter */
87116 };
87117 struct rcu_head rcu;
87118 struct inet_peer *gc_next;
87119diff --git a/include/net/ip.h b/include/net/ip.h
87120index db4a771..965a42a 100644
87121--- a/include/net/ip.h
87122+++ b/include/net/ip.h
87123@@ -316,7 +316,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
87124 }
87125 }
87126
87127-u32 ip_idents_reserve(u32 hash, int segs);
87128+u32 ip_idents_reserve(u32 hash, int segs) __intentional_overflow(-1);
87129 void __ip_select_ident(struct iphdr *iph, int segs);
87130
87131 static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
87132diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
87133index 9922093..a1755d6 100644
87134--- a/include/net/ip_fib.h
87135+++ b/include/net/ip_fib.h
87136@@ -169,7 +169,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
87137
87138 #define FIB_RES_SADDR(net, res) \
87139 ((FIB_RES_NH(res).nh_saddr_genid == \
87140- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
87141+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
87142 FIB_RES_NH(res).nh_saddr : \
87143 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
87144 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
87145diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
87146index 624a8a5..b1e2a24 100644
87147--- a/include/net/ip_vs.h
87148+++ b/include/net/ip_vs.h
87149@@ -558,7 +558,7 @@ struct ip_vs_conn {
87150 struct ip_vs_conn *control; /* Master control connection */
87151 atomic_t n_control; /* Number of controlled ones */
87152 struct ip_vs_dest *dest; /* real server */
87153- atomic_t in_pkts; /* incoming packet counter */
87154+ atomic_unchecked_t in_pkts; /* incoming packet counter */
87155
87156 /* packet transmitter for different forwarding methods. If it
87157 mangles the packet, it must return NF_DROP or better NF_STOLEN,
87158@@ -705,7 +705,7 @@ struct ip_vs_dest {
87159 __be16 port; /* port number of the server */
87160 union nf_inet_addr addr; /* IP address of the server */
87161 volatile unsigned int flags; /* dest status flags */
87162- atomic_t conn_flags; /* flags to copy to conn */
87163+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
87164 atomic_t weight; /* server weight */
87165
87166 atomic_t refcnt; /* reference counter */
87167@@ -960,11 +960,11 @@ struct netns_ipvs {
87168 /* ip_vs_lblc */
87169 int sysctl_lblc_expiration;
87170 struct ctl_table_header *lblc_ctl_header;
87171- struct ctl_table *lblc_ctl_table;
87172+ ctl_table_no_const *lblc_ctl_table;
87173 /* ip_vs_lblcr */
87174 int sysctl_lblcr_expiration;
87175 struct ctl_table_header *lblcr_ctl_header;
87176- struct ctl_table *lblcr_ctl_table;
87177+ ctl_table_no_const *lblcr_ctl_table;
87178 /* ip_vs_est */
87179 struct list_head est_list; /* estimator list */
87180 spinlock_t est_lock;
87181diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
87182index 8d4f588..2e37ad2 100644
87183--- a/include/net/irda/ircomm_tty.h
87184+++ b/include/net/irda/ircomm_tty.h
87185@@ -33,6 +33,7 @@
87186 #include <linux/termios.h>
87187 #include <linux/timer.h>
87188 #include <linux/tty.h> /* struct tty_struct */
87189+#include <asm/local.h>
87190
87191 #include <net/irda/irias_object.h>
87192 #include <net/irda/ircomm_core.h>
87193diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
87194index 714cc9a..ea05f3e 100644
87195--- a/include/net/iucv/af_iucv.h
87196+++ b/include/net/iucv/af_iucv.h
87197@@ -149,7 +149,7 @@ struct iucv_skb_cb {
87198 struct iucv_sock_list {
87199 struct hlist_head head;
87200 rwlock_t lock;
87201- atomic_t autobind_name;
87202+ atomic_unchecked_t autobind_name;
87203 };
87204
87205 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
87206diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
87207index f3be818..bf46196 100644
87208--- a/include/net/llc_c_ac.h
87209+++ b/include/net/llc_c_ac.h
87210@@ -87,7 +87,7 @@
87211 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
87212 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
87213
87214-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
87215+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
87216
87217 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
87218 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
87219diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
87220index 3948cf1..83b28c4 100644
87221--- a/include/net/llc_c_ev.h
87222+++ b/include/net/llc_c_ev.h
87223@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
87224 return (struct llc_conn_state_ev *)skb->cb;
87225 }
87226
87227-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
87228-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
87229+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
87230+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
87231
87232 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
87233 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
87234diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
87235index 0e79cfb..f46db31 100644
87236--- a/include/net/llc_c_st.h
87237+++ b/include/net/llc_c_st.h
87238@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
87239 u8 next_state;
87240 llc_conn_ev_qfyr_t *ev_qualifiers;
87241 llc_conn_action_t *ev_actions;
87242-};
87243+} __do_const;
87244
87245 struct llc_conn_state {
87246 u8 current_state;
87247diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
87248index a61b98c..aade1eb 100644
87249--- a/include/net/llc_s_ac.h
87250+++ b/include/net/llc_s_ac.h
87251@@ -23,7 +23,7 @@
87252 #define SAP_ACT_TEST_IND 9
87253
87254 /* All action functions must look like this */
87255-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
87256+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
87257
87258 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
87259 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
87260diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
87261index 567c681..cd73ac02 100644
87262--- a/include/net/llc_s_st.h
87263+++ b/include/net/llc_s_st.h
87264@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
87265 llc_sap_ev_t ev;
87266 u8 next_state;
87267 llc_sap_action_t *ev_actions;
87268-};
87269+} __do_const;
87270
87271 struct llc_sap_state {
87272 u8 curr_state;
87273diff --git a/include/net/mac80211.h b/include/net/mac80211.h
87274index dae2e24..89336e6 100644
87275--- a/include/net/mac80211.h
87276+++ b/include/net/mac80211.h
87277@@ -4650,7 +4650,7 @@ struct rate_control_ops {
87278 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
87279
87280 u32 (*get_expected_throughput)(void *priv_sta);
87281-};
87282+} __do_const;
87283
87284 static inline int rate_supported(struct ieee80211_sta *sta,
87285 enum ieee80211_band band,
87286diff --git a/include/net/neighbour.h b/include/net/neighbour.h
87287index 47f4254..fd095bc 100644
87288--- a/include/net/neighbour.h
87289+++ b/include/net/neighbour.h
87290@@ -163,7 +163,7 @@ struct neigh_ops {
87291 void (*error_report)(struct neighbour *, struct sk_buff *);
87292 int (*output)(struct neighbour *, struct sk_buff *);
87293 int (*connected_output)(struct neighbour *, struct sk_buff *);
87294-};
87295+} __do_const;
87296
87297 struct pneigh_entry {
87298 struct pneigh_entry *next;
87299@@ -217,7 +217,7 @@ struct neigh_table {
87300 struct neigh_statistics __percpu *stats;
87301 struct neigh_hash_table __rcu *nht;
87302 struct pneigh_entry **phash_buckets;
87303-};
87304+} __randomize_layout;
87305
87306 static inline int neigh_parms_family(struct neigh_parms *p)
87307 {
87308diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
87309index e0d6466..e2f3003 100644
87310--- a/include/net/net_namespace.h
87311+++ b/include/net/net_namespace.h
87312@@ -129,8 +129,8 @@ struct net {
87313 struct netns_ipvs *ipvs;
87314 #endif
87315 struct sock *diag_nlsk;
87316- atomic_t fnhe_genid;
87317-};
87318+ atomic_unchecked_t fnhe_genid;
87319+} __randomize_layout;
87320
87321 #include <linux/seq_file_net.h>
87322
87323@@ -286,7 +286,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
87324 #define __net_init __init
87325 #define __net_exit __exit_refok
87326 #define __net_initdata __initdata
87327+#ifdef CONSTIFY_PLUGIN
87328 #define __net_initconst __initconst
87329+#else
87330+#define __net_initconst __initdata
87331+#endif
87332 #endif
87333
87334 struct pernet_operations {
87335@@ -296,7 +300,7 @@ struct pernet_operations {
87336 void (*exit_batch)(struct list_head *net_exit_list);
87337 int *id;
87338 size_t size;
87339-};
87340+} __do_const;
87341
87342 /*
87343 * Use these carefully. If you implement a network device and it
87344@@ -344,12 +348,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
87345
87346 static inline int rt_genid_ipv4(struct net *net)
87347 {
87348- return atomic_read(&net->ipv4.rt_genid);
87349+ return atomic_read_unchecked(&net->ipv4.rt_genid);
87350 }
87351
87352 static inline void rt_genid_bump_ipv4(struct net *net)
87353 {
87354- atomic_inc(&net->ipv4.rt_genid);
87355+ atomic_inc_unchecked(&net->ipv4.rt_genid);
87356 }
87357
87358 extern void (*__fib6_flush_trees)(struct net *net);
87359@@ -376,12 +380,12 @@ static inline void rt_genid_bump_all(struct net *net)
87360
87361 static inline int fnhe_genid(struct net *net)
87362 {
87363- return atomic_read(&net->fnhe_genid);
87364+ return atomic_read_unchecked(&net->fnhe_genid);
87365 }
87366
87367 static inline void fnhe_genid_bump(struct net *net)
87368 {
87369- atomic_inc(&net->fnhe_genid);
87370+ atomic_inc_unchecked(&net->fnhe_genid);
87371 }
87372
87373 #endif /* __NET_NET_NAMESPACE_H */
87374diff --git a/include/net/netdma.h b/include/net/netdma.h
87375index 8ba8ce2..99b7fff 100644
87376--- a/include/net/netdma.h
87377+++ b/include/net/netdma.h
87378@@ -24,7 +24,7 @@
87379 #include <linux/dmaengine.h>
87380 #include <linux/skbuff.h>
87381
87382-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
87383+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
87384 struct sk_buff *skb, int offset, struct iovec *to,
87385 size_t len, struct dma_pinned_list *pinned_list);
87386
87387diff --git a/include/net/netlink.h b/include/net/netlink.h
87388index 6c10762..3e5de0c 100644
87389--- a/include/net/netlink.h
87390+++ b/include/net/netlink.h
87391@@ -521,7 +521,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
87392 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
87393 {
87394 if (mark)
87395- skb_trim(skb, (unsigned char *) mark - skb->data);
87396+ skb_trim(skb, (const unsigned char *) mark - skb->data);
87397 }
87398
87399 /**
87400diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
87401index 29d6a94..235d3d8 100644
87402--- a/include/net/netns/conntrack.h
87403+++ b/include/net/netns/conntrack.h
87404@@ -14,10 +14,10 @@ struct nf_conntrack_ecache;
87405 struct nf_proto_net {
87406 #ifdef CONFIG_SYSCTL
87407 struct ctl_table_header *ctl_table_header;
87408- struct ctl_table *ctl_table;
87409+ ctl_table_no_const *ctl_table;
87410 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
87411 struct ctl_table_header *ctl_compat_header;
87412- struct ctl_table *ctl_compat_table;
87413+ ctl_table_no_const *ctl_compat_table;
87414 #endif
87415 #endif
87416 unsigned int users;
87417@@ -60,7 +60,7 @@ struct nf_ip_net {
87418 struct nf_icmp_net icmpv6;
87419 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
87420 struct ctl_table_header *ctl_table_header;
87421- struct ctl_table *ctl_table;
87422+ ctl_table_no_const *ctl_table;
87423 #endif
87424 };
87425
87426diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
87427index aec5e12..807233f 100644
87428--- a/include/net/netns/ipv4.h
87429+++ b/include/net/netns/ipv4.h
87430@@ -82,7 +82,7 @@ struct netns_ipv4 {
87431
87432 struct ping_group_range ping_group_range;
87433
87434- atomic_t dev_addr_genid;
87435+ atomic_unchecked_t dev_addr_genid;
87436
87437 #ifdef CONFIG_SYSCTL
87438 unsigned long *sysctl_local_reserved_ports;
87439@@ -96,6 +96,6 @@ struct netns_ipv4 {
87440 struct fib_rules_ops *mr_rules_ops;
87441 #endif
87442 #endif
87443- atomic_t rt_genid;
87444+ atomic_unchecked_t rt_genid;
87445 };
87446 #endif
87447diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
87448index eade27a..42894dd 100644
87449--- a/include/net/netns/ipv6.h
87450+++ b/include/net/netns/ipv6.h
87451@@ -75,8 +75,8 @@ struct netns_ipv6 {
87452 struct fib_rules_ops *mr6_rules_ops;
87453 #endif
87454 #endif
87455- atomic_t dev_addr_genid;
87456- atomic_t rt_genid;
87457+ atomic_unchecked_t dev_addr_genid;
87458+ atomic_unchecked_t rt_genid;
87459 };
87460
87461 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
87462diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
87463index 3492434..209f58c 100644
87464--- a/include/net/netns/xfrm.h
87465+++ b/include/net/netns/xfrm.h
87466@@ -64,7 +64,7 @@ struct netns_xfrm {
87467
87468 /* flow cache part */
87469 struct flow_cache flow_cache_global;
87470- atomic_t flow_cache_genid;
87471+ atomic_unchecked_t flow_cache_genid;
87472 struct list_head flow_cache_gc_list;
87473 spinlock_t flow_cache_gc_lock;
87474 struct work_struct flow_cache_gc_work;
87475diff --git a/include/net/ping.h b/include/net/ping.h
87476index 026479b..d9b2829 100644
87477--- a/include/net/ping.h
87478+++ b/include/net/ping.h
87479@@ -54,7 +54,7 @@ struct ping_iter_state {
87480
87481 extern struct proto ping_prot;
87482 #if IS_ENABLED(CONFIG_IPV6)
87483-extern struct pingv6_ops pingv6_ops;
87484+extern struct pingv6_ops *pingv6_ops;
87485 #endif
87486
87487 struct pingfakehdr {
87488diff --git a/include/net/protocol.h b/include/net/protocol.h
87489index d6fcc1f..ca277058 100644
87490--- a/include/net/protocol.h
87491+++ b/include/net/protocol.h
87492@@ -49,7 +49,7 @@ struct net_protocol {
87493 * socket lookup?
87494 */
87495 icmp_strict_tag_validation:1;
87496-};
87497+} __do_const;
87498
87499 #if IS_ENABLED(CONFIG_IPV6)
87500 struct inet6_protocol {
87501@@ -62,7 +62,7 @@ struct inet6_protocol {
87502 u8 type, u8 code, int offset,
87503 __be32 info);
87504 unsigned int flags; /* INET6_PROTO_xxx */
87505-};
87506+} __do_const;
87507
87508 #define INET6_PROTO_NOPOLICY 0x1
87509 #define INET6_PROTO_FINAL 0x2
87510diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
87511index e21b9f9..0191ef0 100644
87512--- a/include/net/rtnetlink.h
87513+++ b/include/net/rtnetlink.h
87514@@ -93,7 +93,7 @@ struct rtnl_link_ops {
87515 int (*fill_slave_info)(struct sk_buff *skb,
87516 const struct net_device *dev,
87517 const struct net_device *slave_dev);
87518-};
87519+} __do_const;
87520
87521 int __rtnl_link_register(struct rtnl_link_ops *ops);
87522 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
87523diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
87524index 4a5b9a3..ca27d73 100644
87525--- a/include/net/sctp/checksum.h
87526+++ b/include/net/sctp/checksum.h
87527@@ -61,8 +61,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
87528 unsigned int offset)
87529 {
87530 struct sctphdr *sh = sctp_hdr(skb);
87531- __le32 ret, old = sh->checksum;
87532- const struct skb_checksum_ops ops = {
87533+ __le32 ret, old = sh->checksum;
87534+ static const struct skb_checksum_ops ops = {
87535 .update = sctp_csum_update,
87536 .combine = sctp_csum_combine,
87537 };
87538diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
87539index 7f4eeb3..37e8fe1 100644
87540--- a/include/net/sctp/sm.h
87541+++ b/include/net/sctp/sm.h
87542@@ -80,7 +80,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
87543 typedef struct {
87544 sctp_state_fn_t *fn;
87545 const char *name;
87546-} sctp_sm_table_entry_t;
87547+} __do_const sctp_sm_table_entry_t;
87548
87549 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
87550 * currently in use.
87551@@ -292,7 +292,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
87552 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
87553
87554 /* Extern declarations for major data structures. */
87555-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
87556+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
87557
87558
87559 /* Get the size of a DATA chunk payload. */
87560diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
87561index 4ff3f67..89ae38e 100644
87562--- a/include/net/sctp/structs.h
87563+++ b/include/net/sctp/structs.h
87564@@ -509,7 +509,7 @@ struct sctp_pf {
87565 void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
87566 void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
87567 struct sctp_af *af;
87568-};
87569+} __do_const;
87570
87571
87572 /* Structure to track chunk fragments that have been acked, but peer
87573diff --git a/include/net/sock.h b/include/net/sock.h
87574index b9a5bd0..dcd5f3c 100644
87575--- a/include/net/sock.h
87576+++ b/include/net/sock.h
87577@@ -356,7 +356,7 @@ struct sock {
87578 unsigned int sk_napi_id;
87579 unsigned int sk_ll_usec;
87580 #endif
87581- atomic_t sk_drops;
87582+ atomic_unchecked_t sk_drops;
87583 int sk_rcvbuf;
87584
87585 struct sk_filter __rcu *sk_filter;
87586@@ -1053,7 +1053,7 @@ struct proto {
87587 void (*destroy_cgroup)(struct mem_cgroup *memcg);
87588 struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
87589 #endif
87590-};
87591+} __randomize_layout;
87592
87593 /*
87594 * Bits in struct cg_proto.flags
87595@@ -1240,7 +1240,7 @@ static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
87596 return ret >> PAGE_SHIFT;
87597 }
87598
87599-static inline long
87600+static inline long __intentional_overflow(-1)
87601 sk_memory_allocated(const struct sock *sk)
87602 {
87603 struct proto *prot = sk->sk_prot;
87604@@ -1385,7 +1385,7 @@ struct sock_iocb {
87605 struct scm_cookie *scm;
87606 struct msghdr *msg, async_msg;
87607 struct kiocb *kiocb;
87608-};
87609+} __randomize_layout;
87610
87611 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
87612 {
87613@@ -1820,7 +1820,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
87614 }
87615
87616 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
87617- char __user *from, char *to,
87618+ char __user *from, unsigned char *to,
87619 int copy, int offset)
87620 {
87621 if (skb->ip_summed == CHECKSUM_NONE) {
87622@@ -2091,7 +2091,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
87623 }
87624 }
87625
87626-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
87627+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
87628
87629 /**
87630 * sk_page_frag - return an appropriate page_frag
87631diff --git a/include/net/tcp.h b/include/net/tcp.h
87632index 590e01a..76498f3 100644
87633--- a/include/net/tcp.h
87634+++ b/include/net/tcp.h
87635@@ -523,7 +523,7 @@ void tcp_retransmit_timer(struct sock *sk);
87636 void tcp_xmit_retransmit_queue(struct sock *);
87637 void tcp_simple_retransmit(struct sock *);
87638 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
87639-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
87640+int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
87641
87642 void tcp_send_probe0(struct sock *);
87643 void tcp_send_partial(struct sock *);
87644@@ -696,8 +696,8 @@ struct tcp_skb_cb {
87645 struct inet6_skb_parm h6;
87646 #endif
87647 } header; /* For incoming frames */
87648- __u32 seq; /* Starting sequence number */
87649- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
87650+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
87651+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
87652 __u32 when; /* used to compute rtt's */
87653 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
87654
87655@@ -713,7 +713,7 @@ struct tcp_skb_cb {
87656
87657 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
87658 /* 1 byte hole */
87659- __u32 ack_seq; /* Sequence number ACK'd */
87660+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
87661 };
87662
87663 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
87664diff --git a/include/net/xfrm.h b/include/net/xfrm.h
87665index 721e9c3b..3c81bbf 100644
87666--- a/include/net/xfrm.h
87667+++ b/include/net/xfrm.h
87668@@ -285,7 +285,6 @@ struct xfrm_dst;
87669 struct xfrm_policy_afinfo {
87670 unsigned short family;
87671 struct dst_ops *dst_ops;
87672- void (*garbage_collect)(struct net *net);
87673 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
87674 const xfrm_address_t *saddr,
87675 const xfrm_address_t *daddr);
87676@@ -303,7 +302,7 @@ struct xfrm_policy_afinfo {
87677 struct net_device *dev,
87678 const struct flowi *fl);
87679 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
87680-};
87681+} __do_const;
87682
87683 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
87684 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
87685@@ -342,7 +341,7 @@ struct xfrm_state_afinfo {
87686 int (*transport_finish)(struct sk_buff *skb,
87687 int async);
87688 void (*local_error)(struct sk_buff *skb, u32 mtu);
87689-};
87690+} __do_const;
87691
87692 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
87693 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
87694@@ -437,7 +436,7 @@ struct xfrm_mode {
87695 struct module *owner;
87696 unsigned int encap;
87697 int flags;
87698-};
87699+} __do_const;
87700
87701 /* Flags for xfrm_mode. */
87702 enum {
87703@@ -534,7 +533,7 @@ struct xfrm_policy {
87704 struct timer_list timer;
87705
87706 struct flow_cache_object flo;
87707- atomic_t genid;
87708+ atomic_unchecked_t genid;
87709 u32 priority;
87710 u32 index;
87711 struct xfrm_mark mark;
87712@@ -1167,6 +1166,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
87713 }
87714
87715 void xfrm_garbage_collect(struct net *net);
87716+void xfrm_garbage_collect_deferred(struct net *net);
87717
87718 #else
87719
87720@@ -1205,6 +1205,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
87721 static inline void xfrm_garbage_collect(struct net *net)
87722 {
87723 }
87724+static inline void xfrm_garbage_collect_deferred(struct net *net)
87725+{
87726+}
87727 #endif
87728
87729 static __inline__
87730diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
87731index 1017e0b..227aa4d 100644
87732--- a/include/rdma/iw_cm.h
87733+++ b/include/rdma/iw_cm.h
87734@@ -122,7 +122,7 @@ struct iw_cm_verbs {
87735 int backlog);
87736
87737 int (*destroy_listen)(struct iw_cm_id *cm_id);
87738-};
87739+} __no_const;
87740
87741 /**
87742 * iw_create_cm_id - Create an IW CM identifier.
87743diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
87744index 52beadf..598734c 100644
87745--- a/include/scsi/libfc.h
87746+++ b/include/scsi/libfc.h
87747@@ -771,6 +771,7 @@ struct libfc_function_template {
87748 */
87749 void (*disc_stop_final) (struct fc_lport *);
87750 };
87751+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
87752
87753 /**
87754 * struct fc_disc - Discovery context
87755@@ -875,7 +876,7 @@ struct fc_lport {
87756 struct fc_vport *vport;
87757
87758 /* Operational Information */
87759- struct libfc_function_template tt;
87760+ libfc_function_template_no_const tt;
87761 u8 link_up;
87762 u8 qfull;
87763 enum fc_lport_state state;
87764diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
87765index 1a0d184..4fb841f 100644
87766--- a/include/scsi/scsi_device.h
87767+++ b/include/scsi/scsi_device.h
87768@@ -185,9 +185,9 @@ struct scsi_device {
87769 unsigned int max_device_blocked; /* what device_blocked counts down from */
87770 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
87771
87772- atomic_t iorequest_cnt;
87773- atomic_t iodone_cnt;
87774- atomic_t ioerr_cnt;
87775+ atomic_unchecked_t iorequest_cnt;
87776+ atomic_unchecked_t iodone_cnt;
87777+ atomic_unchecked_t ioerr_cnt;
87778
87779 struct device sdev_gendev,
87780 sdev_dev;
87781diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
87782index 007a0bc..7188db8 100644
87783--- a/include/scsi/scsi_transport_fc.h
87784+++ b/include/scsi/scsi_transport_fc.h
87785@@ -756,7 +756,8 @@ struct fc_function_template {
87786 unsigned long show_host_system_hostname:1;
87787
87788 unsigned long disable_target_scan:1;
87789-};
87790+} __do_const;
87791+typedef struct fc_function_template __no_const fc_function_template_no_const;
87792
87793
87794 /**
87795diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
87796index ae6c3b8..fd748ac 100644
87797--- a/include/sound/compress_driver.h
87798+++ b/include/sound/compress_driver.h
87799@@ -128,7 +128,7 @@ struct snd_compr_ops {
87800 struct snd_compr_caps *caps);
87801 int (*get_codec_caps) (struct snd_compr_stream *stream,
87802 struct snd_compr_codec_caps *codec);
87803-};
87804+} __no_const;
87805
87806 /**
87807 * struct snd_compr: Compressed device
87808diff --git a/include/sound/soc.h b/include/sound/soc.h
87809index c83a334..27c8038 100644
87810--- a/include/sound/soc.h
87811+++ b/include/sound/soc.h
87812@@ -817,7 +817,7 @@ struct snd_soc_codec_driver {
87813 /* probe ordering - for components with runtime dependencies */
87814 int probe_order;
87815 int remove_order;
87816-};
87817+} __do_const;
87818
87819 /* SoC platform interface */
87820 struct snd_soc_platform_driver {
87821@@ -861,7 +861,7 @@ struct snd_soc_platform_driver {
87822 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
87823 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
87824 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
87825-};
87826+} __do_const;
87827
87828 struct snd_soc_dai_link_component {
87829 const char *name;
87830diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
87831index 9ec9864..e2ee1ee 100644
87832--- a/include/target/target_core_base.h
87833+++ b/include/target/target_core_base.h
87834@@ -761,7 +761,7 @@ struct se_device {
87835 atomic_long_t write_bytes;
87836 /* Active commands on this virtual SE device */
87837 atomic_t simple_cmds;
87838- atomic_t dev_ordered_id;
87839+ atomic_unchecked_t dev_ordered_id;
87840 atomic_t dev_ordered_sync;
87841 atomic_t dev_qf_count;
87842 int export_count;
87843diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
87844new file mode 100644
87845index 0000000..fb634b7
87846--- /dev/null
87847+++ b/include/trace/events/fs.h
87848@@ -0,0 +1,53 @@
87849+#undef TRACE_SYSTEM
87850+#define TRACE_SYSTEM fs
87851+
87852+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
87853+#define _TRACE_FS_H
87854+
87855+#include <linux/fs.h>
87856+#include <linux/tracepoint.h>
87857+
87858+TRACE_EVENT(do_sys_open,
87859+
87860+ TP_PROTO(const char *filename, int flags, int mode),
87861+
87862+ TP_ARGS(filename, flags, mode),
87863+
87864+ TP_STRUCT__entry(
87865+ __string( filename, filename )
87866+ __field( int, flags )
87867+ __field( int, mode )
87868+ ),
87869+
87870+ TP_fast_assign(
87871+ __assign_str(filename, filename);
87872+ __entry->flags = flags;
87873+ __entry->mode = mode;
87874+ ),
87875+
87876+ TP_printk("\"%s\" %x %o",
87877+ __get_str(filename), __entry->flags, __entry->mode)
87878+);
87879+
87880+TRACE_EVENT(open_exec,
87881+
87882+ TP_PROTO(const char *filename),
87883+
87884+ TP_ARGS(filename),
87885+
87886+ TP_STRUCT__entry(
87887+ __string( filename, filename )
87888+ ),
87889+
87890+ TP_fast_assign(
87891+ __assign_str(filename, filename);
87892+ ),
87893+
87894+ TP_printk("\"%s\"",
87895+ __get_str(filename))
87896+);
87897+
87898+#endif /* _TRACE_FS_H */
87899+
87900+/* This part must be outside protection */
87901+#include <trace/define_trace.h>
87902diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
87903index 3608beb..df39d8a 100644
87904--- a/include/trace/events/irq.h
87905+++ b/include/trace/events/irq.h
87906@@ -36,7 +36,7 @@ struct softirq_action;
87907 */
87908 TRACE_EVENT(irq_handler_entry,
87909
87910- TP_PROTO(int irq, struct irqaction *action),
87911+ TP_PROTO(int irq, const struct irqaction *action),
87912
87913 TP_ARGS(irq, action),
87914
87915@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
87916 */
87917 TRACE_EVENT(irq_handler_exit,
87918
87919- TP_PROTO(int irq, struct irqaction *action, int ret),
87920+ TP_PROTO(int irq, const struct irqaction *action, int ret),
87921
87922 TP_ARGS(irq, action, ret),
87923
87924diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
87925index 7caf44c..23c6f27 100644
87926--- a/include/uapi/linux/a.out.h
87927+++ b/include/uapi/linux/a.out.h
87928@@ -39,6 +39,14 @@ enum machine_type {
87929 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
87930 };
87931
87932+/* Constants for the N_FLAGS field */
87933+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
87934+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
87935+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
87936+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
87937+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
87938+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
87939+
87940 #if !defined (N_MAGIC)
87941 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
87942 #endif
87943diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
87944index 22b6ad3..aeba37e 100644
87945--- a/include/uapi/linux/bcache.h
87946+++ b/include/uapi/linux/bcache.h
87947@@ -5,6 +5,7 @@
87948 * Bcache on disk data structures
87949 */
87950
87951+#include <linux/compiler.h>
87952 #include <asm/types.h>
87953
87954 #define BITMASK(name, type, field, offset, size) \
87955@@ -20,8 +21,8 @@ static inline void SET_##name(type *k, __u64 v) \
87956 /* Btree keys - all units are in sectors */
87957
87958 struct bkey {
87959- __u64 high;
87960- __u64 low;
87961+ __u64 high __intentional_overflow(-1);
87962+ __u64 low __intentional_overflow(-1);
87963 __u64 ptr[];
87964 };
87965
87966diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
87967index d876736..ccce5c0 100644
87968--- a/include/uapi/linux/byteorder/little_endian.h
87969+++ b/include/uapi/linux/byteorder/little_endian.h
87970@@ -42,51 +42,51 @@
87971
87972 static inline __le64 __cpu_to_le64p(const __u64 *p)
87973 {
87974- return (__force __le64)*p;
87975+ return (__force const __le64)*p;
87976 }
87977-static inline __u64 __le64_to_cpup(const __le64 *p)
87978+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
87979 {
87980- return (__force __u64)*p;
87981+ return (__force const __u64)*p;
87982 }
87983 static inline __le32 __cpu_to_le32p(const __u32 *p)
87984 {
87985- return (__force __le32)*p;
87986+ return (__force const __le32)*p;
87987 }
87988 static inline __u32 __le32_to_cpup(const __le32 *p)
87989 {
87990- return (__force __u32)*p;
87991+ return (__force const __u32)*p;
87992 }
87993 static inline __le16 __cpu_to_le16p(const __u16 *p)
87994 {
87995- return (__force __le16)*p;
87996+ return (__force const __le16)*p;
87997 }
87998 static inline __u16 __le16_to_cpup(const __le16 *p)
87999 {
88000- return (__force __u16)*p;
88001+ return (__force const __u16)*p;
88002 }
88003 static inline __be64 __cpu_to_be64p(const __u64 *p)
88004 {
88005- return (__force __be64)__swab64p(p);
88006+ return (__force const __be64)__swab64p(p);
88007 }
88008 static inline __u64 __be64_to_cpup(const __be64 *p)
88009 {
88010- return __swab64p((__u64 *)p);
88011+ return __swab64p((const __u64 *)p);
88012 }
88013 static inline __be32 __cpu_to_be32p(const __u32 *p)
88014 {
88015- return (__force __be32)__swab32p(p);
88016+ return (__force const __be32)__swab32p(p);
88017 }
88018-static inline __u32 __be32_to_cpup(const __be32 *p)
88019+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
88020 {
88021- return __swab32p((__u32 *)p);
88022+ return __swab32p((const __u32 *)p);
88023 }
88024 static inline __be16 __cpu_to_be16p(const __u16 *p)
88025 {
88026- return (__force __be16)__swab16p(p);
88027+ return (__force const __be16)__swab16p(p);
88028 }
88029 static inline __u16 __be16_to_cpup(const __be16 *p)
88030 {
88031- return __swab16p((__u16 *)p);
88032+ return __swab16p((const __u16 *)p);
88033 }
88034 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
88035 #define __le64_to_cpus(x) do { (void)(x); } while (0)
88036diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
88037index ef6103b..d4e65dd 100644
88038--- a/include/uapi/linux/elf.h
88039+++ b/include/uapi/linux/elf.h
88040@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
88041 #define PT_GNU_EH_FRAME 0x6474e550
88042
88043 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
88044+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
88045+
88046+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
88047+
88048+/* Constants for the e_flags field */
88049+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
88050+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
88051+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
88052+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
88053+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
88054+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
88055
88056 /*
88057 * Extended Numbering
88058@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
88059 #define DT_DEBUG 21
88060 #define DT_TEXTREL 22
88061 #define DT_JMPREL 23
88062+#define DT_FLAGS 30
88063+ #define DF_TEXTREL 0x00000004
88064 #define DT_ENCODING 32
88065 #define OLD_DT_LOOS 0x60000000
88066 #define DT_LOOS 0x6000000d
88067@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
88068 #define PF_W 0x2
88069 #define PF_X 0x1
88070
88071+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
88072+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
88073+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
88074+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
88075+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
88076+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
88077+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
88078+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
88079+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
88080+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
88081+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
88082+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
88083+
88084 typedef struct elf32_phdr{
88085 Elf32_Word p_type;
88086 Elf32_Off p_offset;
88087@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
88088 #define EI_OSABI 7
88089 #define EI_PAD 8
88090
88091+#define EI_PAX 14
88092+
88093 #define ELFMAG0 0x7f /* EI_MAG */
88094 #define ELFMAG1 'E'
88095 #define ELFMAG2 'L'
88096diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
88097index aa169c4..6a2771d 100644
88098--- a/include/uapi/linux/personality.h
88099+++ b/include/uapi/linux/personality.h
88100@@ -30,6 +30,7 @@ enum {
88101 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
88102 ADDR_NO_RANDOMIZE | \
88103 ADDR_COMPAT_LAYOUT | \
88104+ ADDR_LIMIT_3GB | \
88105 MMAP_PAGE_ZERO)
88106
88107 /*
88108diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
88109index 7530e74..e714828 100644
88110--- a/include/uapi/linux/screen_info.h
88111+++ b/include/uapi/linux/screen_info.h
88112@@ -43,7 +43,8 @@ struct screen_info {
88113 __u16 pages; /* 0x32 */
88114 __u16 vesa_attributes; /* 0x34 */
88115 __u32 capabilities; /* 0x36 */
88116- __u8 _reserved[6]; /* 0x3a */
88117+ __u16 vesapm_size; /* 0x3a */
88118+ __u8 _reserved[4]; /* 0x3c */
88119 } __attribute__((packed));
88120
88121 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
88122diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
88123index 0e011eb..82681b1 100644
88124--- a/include/uapi/linux/swab.h
88125+++ b/include/uapi/linux/swab.h
88126@@ -43,7 +43,7 @@
88127 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
88128 */
88129
88130-static inline __attribute_const__ __u16 __fswab16(__u16 val)
88131+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
88132 {
88133 #ifdef __HAVE_BUILTIN_BSWAP16__
88134 return __builtin_bswap16(val);
88135@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
88136 #endif
88137 }
88138
88139-static inline __attribute_const__ __u32 __fswab32(__u32 val)
88140+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
88141 {
88142 #ifdef __HAVE_BUILTIN_BSWAP32__
88143 return __builtin_bswap32(val);
88144@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
88145 #endif
88146 }
88147
88148-static inline __attribute_const__ __u64 __fswab64(__u64 val)
88149+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
88150 {
88151 #ifdef __HAVE_BUILTIN_BSWAP64__
88152 return __builtin_bswap64(val);
88153diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
88154index 43aaba1..1c30b48 100644
88155--- a/include/uapi/linux/sysctl.h
88156+++ b/include/uapi/linux/sysctl.h
88157@@ -155,8 +155,6 @@ enum
88158 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
88159 };
88160
88161-
88162-
88163 /* CTL_VM names: */
88164 enum
88165 {
88166diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
88167index 778a329..1416ffb 100644
88168--- a/include/uapi/linux/videodev2.h
88169+++ b/include/uapi/linux/videodev2.h
88170@@ -1285,7 +1285,7 @@ struct v4l2_ext_control {
88171 union {
88172 __s32 value;
88173 __s64 value64;
88174- char *string;
88175+ char __user *string;
88176 __u8 *p_u8;
88177 __u16 *p_u16;
88178 __u32 *p_u32;
88179diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
88180index 1590c49..5eab462 100644
88181--- a/include/uapi/linux/xattr.h
88182+++ b/include/uapi/linux/xattr.h
88183@@ -73,5 +73,9 @@
88184 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
88185 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
88186
88187+/* User namespace */
88188+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
88189+#define XATTR_PAX_FLAGS_SUFFIX "flags"
88190+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
88191
88192 #endif /* _UAPI_LINUX_XATTR_H */
88193diff --git a/include/video/udlfb.h b/include/video/udlfb.h
88194index f9466fa..f4e2b81 100644
88195--- a/include/video/udlfb.h
88196+++ b/include/video/udlfb.h
88197@@ -53,10 +53,10 @@ struct dlfb_data {
88198 u32 pseudo_palette[256];
88199 int blank_mode; /*one of FB_BLANK_ */
88200 /* blit-only rendering path metrics, exposed through sysfs */
88201- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
88202- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
88203- atomic_t bytes_sent; /* to usb, after compression including overhead */
88204- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
88205+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
88206+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
88207+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
88208+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
88209 };
88210
88211 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
88212diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
88213index 30f5362..8ed8ac9 100644
88214--- a/include/video/uvesafb.h
88215+++ b/include/video/uvesafb.h
88216@@ -122,6 +122,7 @@ struct uvesafb_par {
88217 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
88218 u8 pmi_setpal; /* PMI for palette changes */
88219 u16 *pmi_base; /* protected mode interface location */
88220+ u8 *pmi_code; /* protected mode code location */
88221 void *pmi_start;
88222 void *pmi_pal;
88223 u8 *vbe_state_orig; /*
88224diff --git a/init/Kconfig b/init/Kconfig
88225index 80a6907..baf7d53 100644
88226--- a/init/Kconfig
88227+++ b/init/Kconfig
88228@@ -1150,6 +1150,7 @@ endif # CGROUPS
88229
88230 config CHECKPOINT_RESTORE
88231 bool "Checkpoint/restore support" if EXPERT
88232+ depends on !GRKERNSEC
88233 default n
88234 help
88235 Enables additional kernel features in a sake of checkpoint/restore.
88236@@ -1635,7 +1636,7 @@ config SLUB_DEBUG
88237
88238 config COMPAT_BRK
88239 bool "Disable heap randomization"
88240- default y
88241+ default n
88242 help
88243 Randomizing heap placement makes heap exploits harder, but it
88244 also breaks ancient binaries (including anything libc5 based).
88245@@ -1923,7 +1924,7 @@ config INIT_ALL_POSSIBLE
88246 config STOP_MACHINE
88247 bool
88248 default y
88249- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
88250+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
88251 help
88252 Need stop_machine() primitive.
88253
88254diff --git a/init/Makefile b/init/Makefile
88255index 7bc47ee..6da2dc7 100644
88256--- a/init/Makefile
88257+++ b/init/Makefile
88258@@ -2,6 +2,9 @@
88259 # Makefile for the linux kernel.
88260 #
88261
88262+ccflags-y := $(GCC_PLUGINS_CFLAGS)
88263+asflags-y := $(GCC_PLUGINS_AFLAGS)
88264+
88265 obj-y := main.o version.o mounts.o
88266 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
88267 obj-y += noinitramfs.o
88268diff --git a/init/do_mounts.c b/init/do_mounts.c
88269index 82f2288..ea1430a 100644
88270--- a/init/do_mounts.c
88271+++ b/init/do_mounts.c
88272@@ -359,11 +359,11 @@ static void __init get_fs_names(char *page)
88273 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
88274 {
88275 struct super_block *s;
88276- int err = sys_mount(name, "/root", fs, flags, data);
88277+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
88278 if (err)
88279 return err;
88280
88281- sys_chdir("/root");
88282+ sys_chdir((const char __force_user *)"/root");
88283 s = current->fs->pwd.dentry->d_sb;
88284 ROOT_DEV = s->s_dev;
88285 printk(KERN_INFO
88286@@ -484,18 +484,18 @@ void __init change_floppy(char *fmt, ...)
88287 va_start(args, fmt);
88288 vsprintf(buf, fmt, args);
88289 va_end(args);
88290- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
88291+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
88292 if (fd >= 0) {
88293 sys_ioctl(fd, FDEJECT, 0);
88294 sys_close(fd);
88295 }
88296 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
88297- fd = sys_open("/dev/console", O_RDWR, 0);
88298+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
88299 if (fd >= 0) {
88300 sys_ioctl(fd, TCGETS, (long)&termios);
88301 termios.c_lflag &= ~ICANON;
88302 sys_ioctl(fd, TCSETSF, (long)&termios);
88303- sys_read(fd, &c, 1);
88304+ sys_read(fd, (char __user *)&c, 1);
88305 termios.c_lflag |= ICANON;
88306 sys_ioctl(fd, TCSETSF, (long)&termios);
88307 sys_close(fd);
88308@@ -589,8 +589,8 @@ void __init prepare_namespace(void)
88309 mount_root();
88310 out:
88311 devtmpfs_mount("dev");
88312- sys_mount(".", "/", NULL, MS_MOVE, NULL);
88313- sys_chroot(".");
88314+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
88315+ sys_chroot((const char __force_user *)".");
88316 }
88317
88318 static bool is_tmpfs;
88319diff --git a/init/do_mounts.h b/init/do_mounts.h
88320index f5b978a..69dbfe8 100644
88321--- a/init/do_mounts.h
88322+++ b/init/do_mounts.h
88323@@ -15,15 +15,15 @@ extern int root_mountflags;
88324
88325 static inline int create_dev(char *name, dev_t dev)
88326 {
88327- sys_unlink(name);
88328- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
88329+ sys_unlink((char __force_user *)name);
88330+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
88331 }
88332
88333 #if BITS_PER_LONG == 32
88334 static inline u32 bstat(char *name)
88335 {
88336 struct stat64 stat;
88337- if (sys_stat64(name, &stat) != 0)
88338+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
88339 return 0;
88340 if (!S_ISBLK(stat.st_mode))
88341 return 0;
88342@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
88343 static inline u32 bstat(char *name)
88344 {
88345 struct stat stat;
88346- if (sys_newstat(name, &stat) != 0)
88347+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
88348 return 0;
88349 if (!S_ISBLK(stat.st_mode))
88350 return 0;
88351diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
88352index 3e0878e..8a9d7a0 100644
88353--- a/init/do_mounts_initrd.c
88354+++ b/init/do_mounts_initrd.c
88355@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
88356 {
88357 sys_unshare(CLONE_FS | CLONE_FILES);
88358 /* stdin/stdout/stderr for /linuxrc */
88359- sys_open("/dev/console", O_RDWR, 0);
88360+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
88361 sys_dup(0);
88362 sys_dup(0);
88363 /* move initrd over / and chdir/chroot in initrd root */
88364- sys_chdir("/root");
88365- sys_mount(".", "/", NULL, MS_MOVE, NULL);
88366- sys_chroot(".");
88367+ sys_chdir((const char __force_user *)"/root");
88368+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
88369+ sys_chroot((const char __force_user *)".");
88370 sys_setsid();
88371 return 0;
88372 }
88373@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
88374 create_dev("/dev/root.old", Root_RAM0);
88375 /* mount initrd on rootfs' /root */
88376 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
88377- sys_mkdir("/old", 0700);
88378- sys_chdir("/old");
88379+ sys_mkdir((const char __force_user *)"/old", 0700);
88380+ sys_chdir((const char __force_user *)"/old");
88381
88382 /* try loading default modules from initrd */
88383 load_default_modules();
88384@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
88385 current->flags &= ~PF_FREEZER_SKIP;
88386
88387 /* move initrd to rootfs' /old */
88388- sys_mount("..", ".", NULL, MS_MOVE, NULL);
88389+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
88390 /* switch root and cwd back to / of rootfs */
88391- sys_chroot("..");
88392+ sys_chroot((const char __force_user *)"..");
88393
88394 if (new_decode_dev(real_root_dev) == Root_RAM0) {
88395- sys_chdir("/old");
88396+ sys_chdir((const char __force_user *)"/old");
88397 return;
88398 }
88399
88400- sys_chdir("/");
88401+ sys_chdir((const char __force_user *)"/");
88402 ROOT_DEV = new_decode_dev(real_root_dev);
88403 mount_root();
88404
88405 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
88406- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
88407+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
88408 if (!error)
88409 printk("okay\n");
88410 else {
88411- int fd = sys_open("/dev/root.old", O_RDWR, 0);
88412+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
88413 if (error == -ENOENT)
88414 printk("/initrd does not exist. Ignored.\n");
88415 else
88416 printk("failed\n");
88417 printk(KERN_NOTICE "Unmounting old root\n");
88418- sys_umount("/old", MNT_DETACH);
88419+ sys_umount((char __force_user *)"/old", MNT_DETACH);
88420 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
88421 if (fd < 0) {
88422 error = fd;
88423@@ -127,11 +127,11 @@ int __init initrd_load(void)
88424 * mounted in the normal path.
88425 */
88426 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
88427- sys_unlink("/initrd.image");
88428+ sys_unlink((const char __force_user *)"/initrd.image");
88429 handle_initrd();
88430 return 1;
88431 }
88432 }
88433- sys_unlink("/initrd.image");
88434+ sys_unlink((const char __force_user *)"/initrd.image");
88435 return 0;
88436 }
88437diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
88438index 8cb6db5..d729f50 100644
88439--- a/init/do_mounts_md.c
88440+++ b/init/do_mounts_md.c
88441@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
88442 partitioned ? "_d" : "", minor,
88443 md_setup_args[ent].device_names);
88444
88445- fd = sys_open(name, 0, 0);
88446+ fd = sys_open((char __force_user *)name, 0, 0);
88447 if (fd < 0) {
88448 printk(KERN_ERR "md: open failed - cannot start "
88449 "array %s\n", name);
88450@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
88451 * array without it
88452 */
88453 sys_close(fd);
88454- fd = sys_open(name, 0, 0);
88455+ fd = sys_open((char __force_user *)name, 0, 0);
88456 sys_ioctl(fd, BLKRRPART, 0);
88457 }
88458 sys_close(fd);
88459@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
88460
88461 wait_for_device_probe();
88462
88463- fd = sys_open("/dev/md0", 0, 0);
88464+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
88465 if (fd >= 0) {
88466 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
88467 sys_close(fd);
88468diff --git a/init/init_task.c b/init/init_task.c
88469index ba0a7f36..2bcf1d5 100644
88470--- a/init/init_task.c
88471+++ b/init/init_task.c
88472@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
88473 * Initial thread structure. Alignment of this is handled by a special
88474 * linker map entry.
88475 */
88476+#ifdef CONFIG_X86
88477+union thread_union init_thread_union __init_task_data;
88478+#else
88479 union thread_union init_thread_union __init_task_data =
88480 { INIT_THREAD_INFO(init_task) };
88481+#endif
88482diff --git a/init/initramfs.c b/init/initramfs.c
88483index bece48c..e911bd8 100644
88484--- a/init/initramfs.c
88485+++ b/init/initramfs.c
88486@@ -25,7 +25,7 @@ static ssize_t __init xwrite(int fd, const char *p, size_t count)
88487
88488 /* sys_write only can write MAX_RW_COUNT aka 2G-4K bytes at most */
88489 while (count) {
88490- ssize_t rv = sys_write(fd, p, count);
88491+ ssize_t rv = sys_write(fd, (char __force_user *)p, count);
88492
88493 if (rv < 0) {
88494 if (rv == -EINTR || rv == -EAGAIN)
88495@@ -107,7 +107,7 @@ static void __init free_hash(void)
88496 }
88497 }
88498
88499-static long __init do_utime(char *filename, time_t mtime)
88500+static long __init do_utime(char __force_user *filename, time_t mtime)
88501 {
88502 struct timespec t[2];
88503
88504@@ -142,7 +142,7 @@ static void __init dir_utime(void)
88505 struct dir_entry *de, *tmp;
88506 list_for_each_entry_safe(de, tmp, &dir_list, list) {
88507 list_del(&de->list);
88508- do_utime(de->name, de->mtime);
88509+ do_utime((char __force_user *)de->name, de->mtime);
88510 kfree(de->name);
88511 kfree(de);
88512 }
88513@@ -304,7 +304,7 @@ static int __init maybe_link(void)
88514 if (nlink >= 2) {
88515 char *old = find_link(major, minor, ino, mode, collected);
88516 if (old)
88517- return (sys_link(old, collected) < 0) ? -1 : 1;
88518+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
88519 }
88520 return 0;
88521 }
88522@@ -313,11 +313,11 @@ static void __init clean_path(char *path, umode_t mode)
88523 {
88524 struct stat st;
88525
88526- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
88527+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
88528 if (S_ISDIR(st.st_mode))
88529- sys_rmdir(path);
88530+ sys_rmdir((char __force_user *)path);
88531 else
88532- sys_unlink(path);
88533+ sys_unlink((char __force_user *)path);
88534 }
88535 }
88536
88537@@ -338,7 +338,7 @@ static int __init do_name(void)
88538 int openflags = O_WRONLY|O_CREAT;
88539 if (ml != 1)
88540 openflags |= O_TRUNC;
88541- wfd = sys_open(collected, openflags, mode);
88542+ wfd = sys_open((char __force_user *)collected, openflags, mode);
88543
88544 if (wfd >= 0) {
88545 sys_fchown(wfd, uid, gid);
88546@@ -350,17 +350,17 @@ static int __init do_name(void)
88547 }
88548 }
88549 } else if (S_ISDIR(mode)) {
88550- sys_mkdir(collected, mode);
88551- sys_chown(collected, uid, gid);
88552- sys_chmod(collected, mode);
88553+ sys_mkdir((char __force_user *)collected, mode);
88554+ sys_chown((char __force_user *)collected, uid, gid);
88555+ sys_chmod((char __force_user *)collected, mode);
88556 dir_add(collected, mtime);
88557 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
88558 S_ISFIFO(mode) || S_ISSOCK(mode)) {
88559 if (maybe_link() == 0) {
88560- sys_mknod(collected, mode, rdev);
88561- sys_chown(collected, uid, gid);
88562- sys_chmod(collected, mode);
88563- do_utime(collected, mtime);
88564+ sys_mknod((char __force_user *)collected, mode, rdev);
88565+ sys_chown((char __force_user *)collected, uid, gid);
88566+ sys_chmod((char __force_user *)collected, mode);
88567+ do_utime((char __force_user *)collected, mtime);
88568 }
88569 }
88570 return 0;
88571@@ -372,7 +372,7 @@ static int __init do_copy(void)
88572 if (xwrite(wfd, victim, body_len) != body_len)
88573 error("write error");
88574 sys_close(wfd);
88575- do_utime(vcollected, mtime);
88576+ do_utime((char __force_user *)vcollected, mtime);
88577 kfree(vcollected);
88578 eat(body_len);
88579 state = SkipIt;
88580@@ -390,9 +390,9 @@ static int __init do_symlink(void)
88581 {
88582 collected[N_ALIGN(name_len) + body_len] = '\0';
88583 clean_path(collected, 0);
88584- sys_symlink(collected + N_ALIGN(name_len), collected);
88585- sys_lchown(collected, uid, gid);
88586- do_utime(collected, mtime);
88587+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
88588+ sys_lchown((char __force_user *)collected, uid, gid);
88589+ do_utime((char __force_user *)collected, mtime);
88590 state = SkipIt;
88591 next_state = Reset;
88592 return 0;
88593diff --git a/init/main.c b/init/main.c
88594index bb1aed9..64f9745 100644
88595--- a/init/main.c
88596+++ b/init/main.c
88597@@ -98,6 +98,8 @@ extern void radix_tree_init(void);
88598 static inline void mark_rodata_ro(void) { }
88599 #endif
88600
88601+extern void grsecurity_init(void);
88602+
88603 /*
88604 * Debug helper: via this flag we know that we are in 'early bootup code'
88605 * where only the boot processor is running with IRQ disabled. This means
88606@@ -159,6 +161,75 @@ static int __init set_reset_devices(char *str)
88607
88608 __setup("reset_devices", set_reset_devices);
88609
88610+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
88611+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
88612+static int __init setup_grsec_proc_gid(char *str)
88613+{
88614+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
88615+ return 1;
88616+}
88617+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
88618+#endif
88619+
88620+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
88621+unsigned long pax_user_shadow_base __read_only;
88622+EXPORT_SYMBOL(pax_user_shadow_base);
88623+extern char pax_enter_kernel_user[];
88624+extern char pax_exit_kernel_user[];
88625+#endif
88626+
88627+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
88628+static int __init setup_pax_nouderef(char *str)
88629+{
88630+#ifdef CONFIG_X86_32
88631+ unsigned int cpu;
88632+ struct desc_struct *gdt;
88633+
88634+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
88635+ gdt = get_cpu_gdt_table(cpu);
88636+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
88637+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
88638+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
88639+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
88640+ }
88641+ loadsegment(ds, __KERNEL_DS);
88642+ loadsegment(es, __KERNEL_DS);
88643+ loadsegment(ss, __KERNEL_DS);
88644+#else
88645+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
88646+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
88647+ clone_pgd_mask = ~(pgdval_t)0UL;
88648+ pax_user_shadow_base = 0UL;
88649+ setup_clear_cpu_cap(X86_FEATURE_PCID);
88650+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
88651+#endif
88652+
88653+ return 0;
88654+}
88655+early_param("pax_nouderef", setup_pax_nouderef);
88656+
88657+#ifdef CONFIG_X86_64
88658+static int __init setup_pax_weakuderef(char *str)
88659+{
88660+ if (clone_pgd_mask != ~(pgdval_t)0UL)
88661+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
88662+ return 1;
88663+}
88664+__setup("pax_weakuderef", setup_pax_weakuderef);
88665+#endif
88666+#endif
88667+
88668+#ifdef CONFIG_PAX_SOFTMODE
88669+int pax_softmode;
88670+
88671+static int __init setup_pax_softmode(char *str)
88672+{
88673+ get_option(&str, &pax_softmode);
88674+ return 1;
88675+}
88676+__setup("pax_softmode=", setup_pax_softmode);
88677+#endif
88678+
88679 static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
88680 const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
88681 static const char *panic_later, *panic_param;
88682@@ -728,7 +799,7 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
88683 struct blacklist_entry *entry;
88684 char *fn_name;
88685
88686- fn_name = kasprintf(GFP_KERNEL, "%pf", fn);
88687+ fn_name = kasprintf(GFP_KERNEL, "%pX", fn);
88688 if (!fn_name)
88689 return false;
88690
88691@@ -780,7 +851,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
88692 {
88693 int count = preempt_count();
88694 int ret;
88695- char msgbuf[64];
88696+ const char *msg1 = "", *msg2 = "";
88697
88698 if (initcall_blacklisted(fn))
88699 return -EPERM;
88700@@ -790,18 +861,17 @@ int __init_or_module do_one_initcall(initcall_t fn)
88701 else
88702 ret = fn();
88703
88704- msgbuf[0] = 0;
88705-
88706 if (preempt_count() != count) {
88707- sprintf(msgbuf, "preemption imbalance ");
88708+ msg1 = " preemption imbalance";
88709 preempt_count_set(count);
88710 }
88711 if (irqs_disabled()) {
88712- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
88713+ msg2 = " disabled interrupts";
88714 local_irq_enable();
88715 }
88716- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
88717+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
88718
88719+ add_latent_entropy();
88720 return ret;
88721 }
88722
88723@@ -908,8 +978,8 @@ static int run_init_process(const char *init_filename)
88724 {
88725 argv_init[0] = init_filename;
88726 return do_execve(getname_kernel(init_filename),
88727- (const char __user *const __user *)argv_init,
88728- (const char __user *const __user *)envp_init);
88729+ (const char __user *const __force_user *)argv_init,
88730+ (const char __user *const __force_user *)envp_init);
88731 }
88732
88733 static int try_to_run_init_process(const char *init_filename)
88734@@ -926,6 +996,10 @@ static int try_to_run_init_process(const char *init_filename)
88735 return ret;
88736 }
88737
88738+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
88739+extern int gr_init_ran;
88740+#endif
88741+
88742 static noinline void __init kernel_init_freeable(void);
88743
88744 static int __ref kernel_init(void *unused)
88745@@ -950,6 +1024,11 @@ static int __ref kernel_init(void *unused)
88746 ramdisk_execute_command, ret);
88747 }
88748
88749+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
88750+ /* if no initrd was used, be extra sure we enforce chroot restrictions */
88751+ gr_init_ran = 1;
88752+#endif
88753+
88754 /*
88755 * We try each of these until one succeeds.
88756 *
88757@@ -1005,7 +1084,7 @@ static noinline void __init kernel_init_freeable(void)
88758 do_basic_setup();
88759
88760 /* Open the /dev/console on the rootfs, this should never fail */
88761- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
88762+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
88763 pr_err("Warning: unable to open an initial console.\n");
88764
88765 (void) sys_dup(0);
88766@@ -1018,11 +1097,13 @@ static noinline void __init kernel_init_freeable(void)
88767 if (!ramdisk_execute_command)
88768 ramdisk_execute_command = "/init";
88769
88770- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
88771+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
88772 ramdisk_execute_command = NULL;
88773 prepare_namespace();
88774 }
88775
88776+ grsecurity_init();
88777+
88778 /*
88779 * Ok, we have completed the initial bootup, and
88780 * we're essentially up and running. Get rid of the
88781diff --git a/ipc/compat.c b/ipc/compat.c
88782index b5ef4f7..ff31d87 100644
88783--- a/ipc/compat.c
88784+++ b/ipc/compat.c
88785@@ -396,7 +396,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
88786 COMPAT_SHMLBA);
88787 if (err < 0)
88788 return err;
88789- return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
88790+ return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
88791 }
88792 case SHMDT:
88793 return sys_shmdt(compat_ptr(ptr));
88794diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
88795index c3f0326..d4e0579 100644
88796--- a/ipc/ipc_sysctl.c
88797+++ b/ipc/ipc_sysctl.c
88798@@ -30,7 +30,7 @@ static void *get_ipc(struct ctl_table *table)
88799 static int proc_ipc_dointvec(struct ctl_table *table, int write,
88800 void __user *buffer, size_t *lenp, loff_t *ppos)
88801 {
88802- struct ctl_table ipc_table;
88803+ ctl_table_no_const ipc_table;
88804
88805 memcpy(&ipc_table, table, sizeof(ipc_table));
88806 ipc_table.data = get_ipc(table);
88807@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(struct ctl_table *table, int write,
88808 static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
88809 void __user *buffer, size_t *lenp, loff_t *ppos)
88810 {
88811- struct ctl_table ipc_table;
88812+ ctl_table_no_const ipc_table;
88813
88814 memcpy(&ipc_table, table, sizeof(ipc_table));
88815 ipc_table.data = get_ipc(table);
88816@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
88817 static int proc_ipc_callback_dointvec_minmax(struct ctl_table *table, int write,
88818 void __user *buffer, size_t *lenp, loff_t *ppos)
88819 {
88820- struct ctl_table ipc_table;
88821+ ctl_table_no_const ipc_table;
88822 size_t lenp_bef = *lenp;
88823 int rc;
88824
88825@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec_minmax(struct ctl_table *table, int write,
88826 static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
88827 void __user *buffer, size_t *lenp, loff_t *ppos)
88828 {
88829- struct ctl_table ipc_table;
88830+ ctl_table_no_const ipc_table;
88831 memcpy(&ipc_table, table, sizeof(ipc_table));
88832 ipc_table.data = get_ipc(table);
88833
88834@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
88835 static int proc_ipcauto_dointvec_minmax(struct ctl_table *table, int write,
88836 void __user *buffer, size_t *lenp, loff_t *ppos)
88837 {
88838- struct ctl_table ipc_table;
88839+ ctl_table_no_const ipc_table;
88840 size_t lenp_bef = *lenp;
88841 int oldval;
88842 int rc;
88843diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
88844index 68d4e95..1477ded 100644
88845--- a/ipc/mq_sysctl.c
88846+++ b/ipc/mq_sysctl.c
88847@@ -25,7 +25,7 @@ static void *get_mq(struct ctl_table *table)
88848 static int proc_mq_dointvec(struct ctl_table *table, int write,
88849 void __user *buffer, size_t *lenp, loff_t *ppos)
88850 {
88851- struct ctl_table mq_table;
88852+ ctl_table_no_const mq_table;
88853 memcpy(&mq_table, table, sizeof(mq_table));
88854 mq_table.data = get_mq(table);
88855
88856@@ -35,7 +35,7 @@ static int proc_mq_dointvec(struct ctl_table *table, int write,
88857 static int proc_mq_dointvec_minmax(struct ctl_table *table, int write,
88858 void __user *buffer, size_t *lenp, loff_t *ppos)
88859 {
88860- struct ctl_table mq_table;
88861+ ctl_table_no_const mq_table;
88862 memcpy(&mq_table, table, sizeof(mq_table));
88863 mq_table.data = get_mq(table);
88864
88865diff --git a/ipc/mqueue.c b/ipc/mqueue.c
88866index 4fcf39a..d3cc2ec 100644
88867--- a/ipc/mqueue.c
88868+++ b/ipc/mqueue.c
88869@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
88870 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
88871 info->attr.mq_msgsize);
88872
88873+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
88874 spin_lock(&mq_lock);
88875 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
88876 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
88877diff --git a/ipc/shm.c b/ipc/shm.c
88878index 7fc9f9f..95e201f 100644
88879--- a/ipc/shm.c
88880+++ b/ipc/shm.c
88881@@ -72,6 +72,14 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
88882 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
88883 #endif
88884
88885+#ifdef CONFIG_GRKERNSEC
88886+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
88887+ const u64 shm_createtime, const kuid_t cuid,
88888+ const int shmid);
88889+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
88890+ const u64 shm_createtime);
88891+#endif
88892+
88893 void shm_init_ns(struct ipc_namespace *ns)
88894 {
88895 ns->shm_ctlmax = SHMMAX;
88896@@ -559,6 +567,9 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
88897 shp->shm_lprid = 0;
88898 shp->shm_atim = shp->shm_dtim = 0;
88899 shp->shm_ctim = get_seconds();
88900+#ifdef CONFIG_GRKERNSEC
88901+ shp->shm_createtime = ktime_get_ns();
88902+#endif
88903 shp->shm_segsz = size;
88904 shp->shm_nattch = 0;
88905 shp->shm_file = file;
88906@@ -1095,6 +1106,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88907 f_mode = FMODE_READ | FMODE_WRITE;
88908 }
88909 if (shmflg & SHM_EXEC) {
88910+
88911+#ifdef CONFIG_PAX_MPROTECT
88912+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
88913+ goto out;
88914+#endif
88915+
88916 prot |= PROT_EXEC;
88917 acc_mode |= S_IXUGO;
88918 }
88919@@ -1119,6 +1136,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88920 if (err)
88921 goto out_unlock;
88922
88923+#ifdef CONFIG_GRKERNSEC
88924+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
88925+ shp->shm_perm.cuid, shmid) ||
88926+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
88927+ err = -EACCES;
88928+ goto out_unlock;
88929+ }
88930+#endif
88931+
88932 ipc_lock_object(&shp->shm_perm);
88933
88934 /* check if shm_destroy() is tearing down shp */
88935@@ -1131,6 +1157,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88936 path = shp->shm_file->f_path;
88937 path_get(&path);
88938 shp->shm_nattch++;
88939+#ifdef CONFIG_GRKERNSEC
88940+ shp->shm_lapid = current->pid;
88941+#endif
88942 size = i_size_read(path.dentry->d_inode);
88943 ipc_unlock_object(&shp->shm_perm);
88944 rcu_read_unlock();
88945diff --git a/ipc/util.c b/ipc/util.c
88946index 27d74e6..8be0be2 100644
88947--- a/ipc/util.c
88948+++ b/ipc/util.c
88949@@ -71,6 +71,8 @@ struct ipc_proc_iface {
88950 int (*show)(struct seq_file *, void *);
88951 };
88952
88953+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
88954+
88955 static void ipc_memory_notifier(struct work_struct *work)
88956 {
88957 ipcns_notify(IPCNS_MEMCHANGED);
88958@@ -537,6 +539,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
88959 granted_mode >>= 6;
88960 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
88961 granted_mode >>= 3;
88962+
88963+ if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
88964+ return -1;
88965+
88966 /* is there some bit set in requested_mode but not in granted_mode? */
88967 if ((requested_mode & ~granted_mode & 0007) &&
88968 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
88969diff --git a/kernel/audit.c b/kernel/audit.c
88970index ba2ff5a..c6c0deb 100644
88971--- a/kernel/audit.c
88972+++ b/kernel/audit.c
88973@@ -122,7 +122,7 @@ u32 audit_sig_sid = 0;
88974 3) suppressed due to audit_rate_limit
88975 4) suppressed due to audit_backlog_limit
88976 */
88977-static atomic_t audit_lost = ATOMIC_INIT(0);
88978+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
88979
88980 /* The netlink socket. */
88981 static struct sock *audit_sock;
88982@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
88983 unsigned long now;
88984 int print;
88985
88986- atomic_inc(&audit_lost);
88987+ atomic_inc_unchecked(&audit_lost);
88988
88989 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
88990
88991@@ -273,7 +273,7 @@ void audit_log_lost(const char *message)
88992 if (print) {
88993 if (printk_ratelimit())
88994 pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n",
88995- atomic_read(&audit_lost),
88996+ atomic_read_unchecked(&audit_lost),
88997 audit_rate_limit,
88998 audit_backlog_limit);
88999 audit_panic(message);
89000@@ -840,7 +840,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
89001 s.pid = audit_pid;
89002 s.rate_limit = audit_rate_limit;
89003 s.backlog_limit = audit_backlog_limit;
89004- s.lost = atomic_read(&audit_lost);
89005+ s.lost = atomic_read_unchecked(&audit_lost);
89006 s.backlog = skb_queue_len(&audit_skb_queue);
89007 s.version = AUDIT_VERSION_LATEST;
89008 s.backlog_wait_time = audit_backlog_wait_time;
89009diff --git a/kernel/auditsc.c b/kernel/auditsc.c
89010index 21eae3c..66db239 100644
89011--- a/kernel/auditsc.c
89012+++ b/kernel/auditsc.c
89013@@ -2023,7 +2023,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
89014 }
89015
89016 /* global counter which is incremented every time something logs in */
89017-static atomic_t session_id = ATOMIC_INIT(0);
89018+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
89019
89020 static int audit_set_loginuid_perm(kuid_t loginuid)
89021 {
89022@@ -2090,7 +2090,7 @@ int audit_set_loginuid(kuid_t loginuid)
89023
89024 /* are we setting or clearing? */
89025 if (uid_valid(loginuid))
89026- sessionid = (unsigned int)atomic_inc_return(&session_id);
89027+ sessionid = (unsigned int)atomic_inc_return_unchecked(&session_id);
89028
89029 task->sessionid = sessionid;
89030 task->loginuid = loginuid;
89031diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
89032index 7f0dbcb..b54bb2c 100644
89033--- a/kernel/bpf/core.c
89034+++ b/kernel/bpf/core.c
89035@@ -22,6 +22,7 @@
89036 */
89037 #include <linux/filter.h>
89038 #include <linux/skbuff.h>
89039+#include <linux/vmalloc.h>
89040 #include <asm/unaligned.h>
89041
89042 /* Registers */
89043@@ -63,6 +64,67 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
89044 return NULL;
89045 }
89046
89047+struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
89048+{
89049+ gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
89050+ gfp_extra_flags;
89051+ struct bpf_work_struct *ws;
89052+ struct bpf_prog *fp;
89053+
89054+ size = round_up(size, PAGE_SIZE);
89055+ fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
89056+ if (fp == NULL)
89057+ return NULL;
89058+
89059+ ws = kmalloc(sizeof(*ws), GFP_KERNEL | gfp_extra_flags);
89060+ if (ws == NULL) {
89061+ vfree(fp);
89062+ return NULL;
89063+ }
89064+
89065+ fp->pages = size / PAGE_SIZE;
89066+ fp->work = ws;
89067+
89068+ return fp;
89069+}
89070+EXPORT_SYMBOL_GPL(bpf_prog_alloc);
89071+
89072+struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
89073+ gfp_t gfp_extra_flags)
89074+{
89075+ gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
89076+ gfp_extra_flags;
89077+ struct bpf_prog *fp;
89078+
89079+ BUG_ON(fp_old == NULL);
89080+
89081+ size = round_up(size, PAGE_SIZE);
89082+ if (size <= fp_old->pages * PAGE_SIZE)
89083+ return fp_old;
89084+
89085+ fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
89086+ if (fp != NULL) {
89087+ memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
89088+ fp->pages = size / PAGE_SIZE;
89089+
89090+ /* We keep fp->work from fp_old around in the new
89091+ * reallocated structure.
89092+ */
89093+ fp_old->work = NULL;
89094+ __bpf_prog_free(fp_old);
89095+ }
89096+
89097+ return fp;
89098+}
89099+EXPORT_SYMBOL_GPL(bpf_prog_realloc);
89100+
89101+void __bpf_prog_free(struct bpf_prog *fp)
89102+{
89103+ kfree(fp->work);
89104+ vfree(fp);
89105+}
89106+EXPORT_SYMBOL_GPL(__bpf_prog_free);
89107+
89108 /* Base function for offset calculation. Needs to go into .text section,
89109 * therefore keeping it non-static as well; will also be used by JITs
89110 * anyway later on, so do not let the compiler omit it.
89111@@ -523,12 +585,26 @@ void bpf_prog_select_runtime(struct bpf_prog *fp)
89112
89113 /* Probe if internal BPF can be JITed */
89114 bpf_int_jit_compile(fp);
89115+ /* Lock whole bpf_prog as read-only */
89116+ bpf_prog_lock_ro(fp);
89117 }
89118 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
89119
89120-/* free internal BPF program */
89121+static void bpf_prog_free_deferred(struct work_struct *work)
89122+{
89123+ struct bpf_work_struct *ws;
89124+
89125+ ws = container_of(work, struct bpf_work_struct, work);
89126+ bpf_jit_free(ws->prog);
89127+}
89128+
89129+/* Free internal BPF program */
89130 void bpf_prog_free(struct bpf_prog *fp)
89131 {
89132- bpf_jit_free(fp);
89133+ struct bpf_work_struct *ws = fp->work;
89134+
89135+ INIT_WORK(&ws->work, bpf_prog_free_deferred);
89136+ ws->prog = fp;
89137+ schedule_work(&ws->work);
89138 }
89139 EXPORT_SYMBOL_GPL(bpf_prog_free);
89140diff --git a/kernel/capability.c b/kernel/capability.c
89141index 989f5bf..d317ca0 100644
89142--- a/kernel/capability.c
89143+++ b/kernel/capability.c
89144@@ -192,6 +192,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
89145 * before modification is attempted and the application
89146 * fails.
89147 */
89148+ if (tocopy > ARRAY_SIZE(kdata))
89149+ return -EFAULT;
89150+
89151 if (copy_to_user(dataptr, kdata, tocopy
89152 * sizeof(struct __user_cap_data_struct))) {
89153 return -EFAULT;
89154@@ -297,10 +300,11 @@ bool has_ns_capability(struct task_struct *t,
89155 int ret;
89156
89157 rcu_read_lock();
89158- ret = security_capable(__task_cred(t), ns, cap);
89159+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
89160+ gr_task_is_capable(t, __task_cred(t), cap);
89161 rcu_read_unlock();
89162
89163- return (ret == 0);
89164+ return ret;
89165 }
89166
89167 /**
89168@@ -337,10 +341,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
89169 int ret;
89170
89171 rcu_read_lock();
89172- ret = security_capable_noaudit(__task_cred(t), ns, cap);
89173+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
89174 rcu_read_unlock();
89175
89176- return (ret == 0);
89177+ return ret;
89178 }
89179
89180 /**
89181@@ -378,7 +382,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
89182 BUG();
89183 }
89184
89185- if (security_capable(current_cred(), ns, cap) == 0) {
89186+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
89187 current->flags |= PF_SUPERPRIV;
89188 return true;
89189 }
89190@@ -386,6 +390,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
89191 }
89192 EXPORT_SYMBOL(ns_capable);
89193
89194+bool ns_capable_nolog(struct user_namespace *ns, int cap)
89195+{
89196+ if (unlikely(!cap_valid(cap))) {
89197+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
89198+ BUG();
89199+ }
89200+
89201+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
89202+ current->flags |= PF_SUPERPRIV;
89203+ return true;
89204+ }
89205+ return false;
89206+}
89207+EXPORT_SYMBOL(ns_capable_nolog);
89208+
89209 /**
89210 * file_ns_capable - Determine if the file's opener had a capability in effect
89211 * @file: The file we want to check
89212@@ -427,6 +446,12 @@ bool capable(int cap)
89213 }
89214 EXPORT_SYMBOL(capable);
89215
89216+bool capable_nolog(int cap)
89217+{
89218+ return ns_capable_nolog(&init_user_ns, cap);
89219+}
89220+EXPORT_SYMBOL(capable_nolog);
89221+
89222 /**
89223 * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
89224 * @inode: The inode in question
89225@@ -444,3 +469,12 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
89226 kgid_has_mapping(ns, inode->i_gid);
89227 }
89228 EXPORT_SYMBOL(capable_wrt_inode_uidgid);
89229+
89230+bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap)
89231+{
89232+ struct user_namespace *ns = current_user_ns();
89233+
89234+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
89235+ kgid_has_mapping(ns, inode->i_gid);
89236+}
89237+EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
89238diff --git a/kernel/cgroup.c b/kernel/cgroup.c
89239index 3a73f99..4f29fea 100644
89240--- a/kernel/cgroup.c
89241+++ b/kernel/cgroup.c
89242@@ -5341,6 +5341,14 @@ static void cgroup_release_agent(struct work_struct *work)
89243 release_list);
89244 list_del_init(&cgrp->release_list);
89245 raw_spin_unlock(&release_list_lock);
89246+
89247+ /*
89248+ * don't bother calling call_usermodehelper if we haven't
89249+ * configured a binary to execute
89250+ */
89251+ if (cgrp->root->release_agent_path[0] == '\0')
89252+ goto continue_free;
89253+
89254 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
89255 if (!pathbuf)
89256 goto continue_free;
89257@@ -5539,7 +5547,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
89258 struct task_struct *task;
89259 int count = 0;
89260
89261- seq_printf(seq, "css_set %p\n", cset);
89262+ seq_printf(seq, "css_set %pK\n", cset);
89263
89264 list_for_each_entry(task, &cset->tasks, cg_list) {
89265 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
89266diff --git a/kernel/compat.c b/kernel/compat.c
89267index ebb3c36..1df606e 100644
89268--- a/kernel/compat.c
89269+++ b/kernel/compat.c
89270@@ -13,6 +13,7 @@
89271
89272 #include <linux/linkage.h>
89273 #include <linux/compat.h>
89274+#include <linux/module.h>
89275 #include <linux/errno.h>
89276 #include <linux/time.h>
89277 #include <linux/signal.h>
89278@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
89279 mm_segment_t oldfs;
89280 long ret;
89281
89282- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
89283+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
89284 oldfs = get_fs();
89285 set_fs(KERNEL_DS);
89286 ret = hrtimer_nanosleep_restart(restart);
89287@@ -252,7 +253,7 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
89288 oldfs = get_fs();
89289 set_fs(KERNEL_DS);
89290 ret = hrtimer_nanosleep(&tu,
89291- rmtp ? (struct timespec __user *)&rmt : NULL,
89292+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
89293 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
89294 set_fs(oldfs);
89295
89296@@ -379,7 +380,7 @@ COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set)
89297 mm_segment_t old_fs = get_fs();
89298
89299 set_fs(KERNEL_DS);
89300- ret = sys_sigpending((old_sigset_t __user *) &s);
89301+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
89302 set_fs(old_fs);
89303 if (ret == 0)
89304 ret = put_user(s, set);
89305@@ -469,7 +470,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
89306 mm_segment_t old_fs = get_fs();
89307
89308 set_fs(KERNEL_DS);
89309- ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r);
89310+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
89311 set_fs(old_fs);
89312
89313 if (!ret) {
89314@@ -551,8 +552,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
89315 set_fs (KERNEL_DS);
89316 ret = sys_wait4(pid,
89317 (stat_addr ?
89318- (unsigned int __user *) &status : NULL),
89319- options, (struct rusage __user *) &r);
89320+ (unsigned int __force_user *) &status : NULL),
89321+ options, (struct rusage __force_user *) &r);
89322 set_fs (old_fs);
89323
89324 if (ret > 0) {
89325@@ -578,8 +579,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
89326 memset(&info, 0, sizeof(info));
89327
89328 set_fs(KERNEL_DS);
89329- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
89330- uru ? (struct rusage __user *)&ru : NULL);
89331+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
89332+ uru ? (struct rusage __force_user *)&ru : NULL);
89333 set_fs(old_fs);
89334
89335 if ((ret < 0) || (info.si_signo == 0))
89336@@ -713,8 +714,8 @@ COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
89337 oldfs = get_fs();
89338 set_fs(KERNEL_DS);
89339 err = sys_timer_settime(timer_id, flags,
89340- (struct itimerspec __user *) &newts,
89341- (struct itimerspec __user *) &oldts);
89342+ (struct itimerspec __force_user *) &newts,
89343+ (struct itimerspec __force_user *) &oldts);
89344 set_fs(oldfs);
89345 if (!err && old && put_compat_itimerspec(old, &oldts))
89346 return -EFAULT;
89347@@ -731,7 +732,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
89348 oldfs = get_fs();
89349 set_fs(KERNEL_DS);
89350 err = sys_timer_gettime(timer_id,
89351- (struct itimerspec __user *) &ts);
89352+ (struct itimerspec __force_user *) &ts);
89353 set_fs(oldfs);
89354 if (!err && put_compat_itimerspec(setting, &ts))
89355 return -EFAULT;
89356@@ -750,7 +751,7 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
89357 oldfs = get_fs();
89358 set_fs(KERNEL_DS);
89359 err = sys_clock_settime(which_clock,
89360- (struct timespec __user *) &ts);
89361+ (struct timespec __force_user *) &ts);
89362 set_fs(oldfs);
89363 return err;
89364 }
89365@@ -765,7 +766,7 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
89366 oldfs = get_fs();
89367 set_fs(KERNEL_DS);
89368 err = sys_clock_gettime(which_clock,
89369- (struct timespec __user *) &ts);
89370+ (struct timespec __force_user *) &ts);
89371 set_fs(oldfs);
89372 if (!err && compat_put_timespec(&ts, tp))
89373 return -EFAULT;
89374@@ -785,7 +786,7 @@ COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
89375
89376 oldfs = get_fs();
89377 set_fs(KERNEL_DS);
89378- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
89379+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
89380 set_fs(oldfs);
89381
89382 err = compat_put_timex(utp, &txc);
89383@@ -805,7 +806,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
89384 oldfs = get_fs();
89385 set_fs(KERNEL_DS);
89386 err = sys_clock_getres(which_clock,
89387- (struct timespec __user *) &ts);
89388+ (struct timespec __force_user *) &ts);
89389 set_fs(oldfs);
89390 if (!err && tp && compat_put_timespec(&ts, tp))
89391 return -EFAULT;
89392@@ -819,7 +820,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
89393 struct timespec tu;
89394 struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
89395
89396- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
89397+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
89398 oldfs = get_fs();
89399 set_fs(KERNEL_DS);
89400 err = clock_nanosleep_restart(restart);
89401@@ -851,8 +852,8 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
89402 oldfs = get_fs();
89403 set_fs(KERNEL_DS);
89404 err = sys_clock_nanosleep(which_clock, flags,
89405- (struct timespec __user *) &in,
89406- (struct timespec __user *) &out);
89407+ (struct timespec __force_user *) &in,
89408+ (struct timespec __force_user *) &out);
89409 set_fs(oldfs);
89410
89411 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
89412@@ -1146,7 +1147,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
89413 mm_segment_t old_fs = get_fs();
89414
89415 set_fs(KERNEL_DS);
89416- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
89417+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
89418 set_fs(old_fs);
89419 if (compat_put_timespec(&t, interval))
89420 return -EFAULT;
89421diff --git a/kernel/configs.c b/kernel/configs.c
89422index c18b1f1..b9a0132 100644
89423--- a/kernel/configs.c
89424+++ b/kernel/configs.c
89425@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
89426 struct proc_dir_entry *entry;
89427
89428 /* create the current config file */
89429+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
89430+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
89431+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
89432+ &ikconfig_file_ops);
89433+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
89434+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
89435+ &ikconfig_file_ops);
89436+#endif
89437+#else
89438 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
89439 &ikconfig_file_ops);
89440+#endif
89441+
89442 if (!entry)
89443 return -ENOMEM;
89444
89445diff --git a/kernel/cred.c b/kernel/cred.c
89446index e0573a4..26c0fd3 100644
89447--- a/kernel/cred.c
89448+++ b/kernel/cred.c
89449@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
89450 validate_creds(cred);
89451 alter_cred_subscribers(cred, -1);
89452 put_cred(cred);
89453+
89454+#ifdef CONFIG_GRKERNSEC_SETXID
89455+ cred = (struct cred *) tsk->delayed_cred;
89456+ if (cred != NULL) {
89457+ tsk->delayed_cred = NULL;
89458+ validate_creds(cred);
89459+ alter_cred_subscribers(cred, -1);
89460+ put_cred(cred);
89461+ }
89462+#endif
89463 }
89464
89465 /**
89466@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
89467 * Always returns 0 thus allowing this function to be tail-called at the end
89468 * of, say, sys_setgid().
89469 */
89470-int commit_creds(struct cred *new)
89471+static int __commit_creds(struct cred *new)
89472 {
89473 struct task_struct *task = current;
89474 const struct cred *old = task->real_cred;
89475@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
89476
89477 get_cred(new); /* we will require a ref for the subj creds too */
89478
89479+ gr_set_role_label(task, new->uid, new->gid);
89480+
89481 /* dumpability changes */
89482 if (!uid_eq(old->euid, new->euid) ||
89483 !gid_eq(old->egid, new->egid) ||
89484@@ -479,6 +491,105 @@ int commit_creds(struct cred *new)
89485 put_cred(old);
89486 return 0;
89487 }
89488+#ifdef CONFIG_GRKERNSEC_SETXID
89489+extern int set_user(struct cred *new);
89490+
89491+void gr_delayed_cred_worker(void)
89492+{
89493+ const struct cred *new = current->delayed_cred;
89494+ struct cred *ncred;
89495+
89496+ current->delayed_cred = NULL;
89497+
89498+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
89499+ // from doing get_cred on it when queueing this
89500+ put_cred(new);
89501+ return;
89502+ } else if (new == NULL)
89503+ return;
89504+
89505+ ncred = prepare_creds();
89506+ if (!ncred)
89507+ goto die;
89508+ // uids
89509+ ncred->uid = new->uid;
89510+ ncred->euid = new->euid;
89511+ ncred->suid = new->suid;
89512+ ncred->fsuid = new->fsuid;
89513+ // gids
89514+ ncred->gid = new->gid;
89515+ ncred->egid = new->egid;
89516+ ncred->sgid = new->sgid;
89517+ ncred->fsgid = new->fsgid;
89518+ // groups
89519+ set_groups(ncred, new->group_info);
89520+ // caps
89521+ ncred->securebits = new->securebits;
89522+ ncred->cap_inheritable = new->cap_inheritable;
89523+ ncred->cap_permitted = new->cap_permitted;
89524+ ncred->cap_effective = new->cap_effective;
89525+ ncred->cap_bset = new->cap_bset;
89526+
89527+ if (set_user(ncred)) {
89528+ abort_creds(ncred);
89529+ goto die;
89530+ }
89531+
89532+ // from doing get_cred on it when queueing this
89533+ put_cred(new);
89534+
89535+ __commit_creds(ncred);
89536+ return;
89537+die:
89538+ // from doing get_cred on it when queueing this
89539+ put_cred(new);
89540+ do_group_exit(SIGKILL);
89541+}
89542+#endif
89543+
89544+int commit_creds(struct cred *new)
89545+{
89546+#ifdef CONFIG_GRKERNSEC_SETXID
89547+ int ret;
89548+ int schedule_it = 0;
89549+ struct task_struct *t;
89550+ unsigned oldsecurebits = current_cred()->securebits;
89551+
89552+ /* we won't get called with tasklist_lock held for writing
89553+ and interrupts disabled as the cred struct in that case is
89554+ init_cred
89555+ */
89556+ if (grsec_enable_setxid && !current_is_single_threaded() &&
89557+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
89558+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
89559+ schedule_it = 1;
89560+ }
89561+ ret = __commit_creds(new);
89562+ if (schedule_it) {
89563+ rcu_read_lock();
89564+ read_lock(&tasklist_lock);
89565+ for (t = next_thread(current); t != current;
89566+ t = next_thread(t)) {
89567+ /* we'll check if the thread has uid 0 in
89568+ * the delayed worker routine
89569+ */
89570+ if (task_securebits(t) == oldsecurebits &&
89571+ t->delayed_cred == NULL) {
89572+ t->delayed_cred = get_cred(new);
89573+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
89574+ set_tsk_need_resched(t);
89575+ }
89576+ }
89577+ read_unlock(&tasklist_lock);
89578+ rcu_read_unlock();
89579+ }
89580+
89581+ return ret;
89582+#else
89583+ return __commit_creds(new);
89584+#endif
89585+}
89586+
89587 EXPORT_SYMBOL(commit_creds);
89588
89589 /**
89590diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
89591index 1adf62b..7736e06 100644
89592--- a/kernel/debug/debug_core.c
89593+++ b/kernel/debug/debug_core.c
89594@@ -124,7 +124,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
89595 */
89596 static atomic_t masters_in_kgdb;
89597 static atomic_t slaves_in_kgdb;
89598-static atomic_t kgdb_break_tasklet_var;
89599+static atomic_unchecked_t kgdb_break_tasklet_var;
89600 atomic_t kgdb_setting_breakpoint;
89601
89602 struct task_struct *kgdb_usethread;
89603@@ -134,7 +134,7 @@ int kgdb_single_step;
89604 static pid_t kgdb_sstep_pid;
89605
89606 /* to keep track of the CPU which is doing the single stepping*/
89607-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
89608+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
89609
89610 /*
89611 * If you are debugging a problem where roundup (the collection of
89612@@ -549,7 +549,7 @@ return_normal:
89613 * kernel will only try for the value of sstep_tries before
89614 * giving up and continuing on.
89615 */
89616- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
89617+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
89618 (kgdb_info[cpu].task &&
89619 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
89620 atomic_set(&kgdb_active, -1);
89621@@ -647,8 +647,8 @@ cpu_master_loop:
89622 }
89623
89624 kgdb_restore:
89625- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
89626- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
89627+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
89628+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
89629 if (kgdb_info[sstep_cpu].task)
89630 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
89631 else
89632@@ -925,18 +925,18 @@ static void kgdb_unregister_callbacks(void)
89633 static void kgdb_tasklet_bpt(unsigned long ing)
89634 {
89635 kgdb_breakpoint();
89636- atomic_set(&kgdb_break_tasklet_var, 0);
89637+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
89638 }
89639
89640 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
89641
89642 void kgdb_schedule_breakpoint(void)
89643 {
89644- if (atomic_read(&kgdb_break_tasklet_var) ||
89645+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
89646 atomic_read(&kgdb_active) != -1 ||
89647 atomic_read(&kgdb_setting_breakpoint))
89648 return;
89649- atomic_inc(&kgdb_break_tasklet_var);
89650+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
89651 tasklet_schedule(&kgdb_tasklet_breakpoint);
89652 }
89653 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
89654diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
89655index 379650b..30c5180 100644
89656--- a/kernel/debug/kdb/kdb_main.c
89657+++ b/kernel/debug/kdb/kdb_main.c
89658@@ -1977,7 +1977,7 @@ static int kdb_lsmod(int argc, const char **argv)
89659 continue;
89660
89661 kdb_printf("%-20s%8u 0x%p ", mod->name,
89662- mod->core_size, (void *)mod);
89663+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
89664 #ifdef CONFIG_MODULE_UNLOAD
89665 kdb_printf("%4ld ", module_refcount(mod));
89666 #endif
89667@@ -1987,7 +1987,7 @@ static int kdb_lsmod(int argc, const char **argv)
89668 kdb_printf(" (Loading)");
89669 else
89670 kdb_printf(" (Live)");
89671- kdb_printf(" 0x%p", mod->module_core);
89672+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
89673
89674 #ifdef CONFIG_MODULE_UNLOAD
89675 {
89676diff --git a/kernel/events/core.c b/kernel/events/core.c
89677index 963bf13..a78dd3e 100644
89678--- a/kernel/events/core.c
89679+++ b/kernel/events/core.c
89680@@ -161,8 +161,15 @@ static struct srcu_struct pmus_srcu;
89681 * 0 - disallow raw tracepoint access for unpriv
89682 * 1 - disallow cpu events for unpriv
89683 * 2 - disallow kernel profiling for unpriv
89684+ * 3 - disallow all unpriv perf event use
89685 */
89686-int sysctl_perf_event_paranoid __read_mostly = 1;
89687+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
89688+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
89689+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
89690+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
89691+#else
89692+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
89693+#endif
89694
89695 /* Minimum for 512 kiB + 1 user control page */
89696 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
89697@@ -188,7 +195,7 @@ void update_perf_cpu_limits(void)
89698
89699 tmp *= sysctl_perf_cpu_time_max_percent;
89700 do_div(tmp, 100);
89701- ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
89702+ ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
89703 }
89704
89705 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
89706@@ -294,7 +301,7 @@ void perf_sample_event_took(u64 sample_len_ns)
89707 }
89708 }
89709
89710-static atomic64_t perf_event_id;
89711+static atomic64_unchecked_t perf_event_id;
89712
89713 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
89714 enum event_type_t event_type);
89715@@ -3034,7 +3041,7 @@ static void __perf_event_read(void *info)
89716
89717 static inline u64 perf_event_count(struct perf_event *event)
89718 {
89719- return local64_read(&event->count) + atomic64_read(&event->child_count);
89720+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
89721 }
89722
89723 static u64 perf_event_read(struct perf_event *event)
89724@@ -3410,9 +3417,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
89725 mutex_lock(&event->child_mutex);
89726 total += perf_event_read(event);
89727 *enabled += event->total_time_enabled +
89728- atomic64_read(&event->child_total_time_enabled);
89729+ atomic64_read_unchecked(&event->child_total_time_enabled);
89730 *running += event->total_time_running +
89731- atomic64_read(&event->child_total_time_running);
89732+ atomic64_read_unchecked(&event->child_total_time_running);
89733
89734 list_for_each_entry(child, &event->child_list, child_list) {
89735 total += perf_event_read(child);
89736@@ -3861,10 +3868,10 @@ void perf_event_update_userpage(struct perf_event *event)
89737 userpg->offset -= local64_read(&event->hw.prev_count);
89738
89739 userpg->time_enabled = enabled +
89740- atomic64_read(&event->child_total_time_enabled);
89741+ atomic64_read_unchecked(&event->child_total_time_enabled);
89742
89743 userpg->time_running = running +
89744- atomic64_read(&event->child_total_time_running);
89745+ atomic64_read_unchecked(&event->child_total_time_running);
89746
89747 arch_perf_update_userpage(userpg, now);
89748
89749@@ -4428,7 +4435,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
89750
89751 /* Data. */
89752 sp = perf_user_stack_pointer(regs);
89753- rem = __output_copy_user(handle, (void *) sp, dump_size);
89754+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
89755 dyn_size = dump_size - rem;
89756
89757 perf_output_skip(handle, rem);
89758@@ -4519,11 +4526,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
89759 values[n++] = perf_event_count(event);
89760 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
89761 values[n++] = enabled +
89762- atomic64_read(&event->child_total_time_enabled);
89763+ atomic64_read_unchecked(&event->child_total_time_enabled);
89764 }
89765 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
89766 values[n++] = running +
89767- atomic64_read(&event->child_total_time_running);
89768+ atomic64_read_unchecked(&event->child_total_time_running);
89769 }
89770 if (read_format & PERF_FORMAT_ID)
89771 values[n++] = primary_event_id(event);
89772@@ -6838,7 +6845,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
89773 event->parent = parent_event;
89774
89775 event->ns = get_pid_ns(task_active_pid_ns(current));
89776- event->id = atomic64_inc_return(&perf_event_id);
89777+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
89778
89779 event->state = PERF_EVENT_STATE_INACTIVE;
89780
89781@@ -7117,6 +7124,11 @@ SYSCALL_DEFINE5(perf_event_open,
89782 if (flags & ~PERF_FLAG_ALL)
89783 return -EINVAL;
89784
89785+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
89786+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
89787+ return -EACCES;
89788+#endif
89789+
89790 err = perf_copy_attr(attr_uptr, &attr);
89791 if (err)
89792 return err;
89793@@ -7469,10 +7481,10 @@ static void sync_child_event(struct perf_event *child_event,
89794 /*
89795 * Add back the child's count to the parent's count:
89796 */
89797- atomic64_add(child_val, &parent_event->child_count);
89798- atomic64_add(child_event->total_time_enabled,
89799+ atomic64_add_unchecked(child_val, &parent_event->child_count);
89800+ atomic64_add_unchecked(child_event->total_time_enabled,
89801 &parent_event->child_total_time_enabled);
89802- atomic64_add(child_event->total_time_running,
89803+ atomic64_add_unchecked(child_event->total_time_running,
89804 &parent_event->child_total_time_running);
89805
89806 /*
89807diff --git a/kernel/events/internal.h b/kernel/events/internal.h
89808index 569b2187..19940d9 100644
89809--- a/kernel/events/internal.h
89810+++ b/kernel/events/internal.h
89811@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
89812 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
89813 }
89814
89815-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
89816+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
89817 static inline unsigned long \
89818 func_name(struct perf_output_handle *handle, \
89819- const void *buf, unsigned long len) \
89820+ const void user *buf, unsigned long len) \
89821 { \
89822 unsigned long size, written; \
89823 \
89824@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
89825 return 0;
89826 }
89827
89828-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
89829+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
89830
89831 static inline unsigned long
89832 memcpy_skip(void *dst, const void *src, unsigned long n)
89833@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
89834 return 0;
89835 }
89836
89837-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
89838+DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
89839
89840 #ifndef arch_perf_out_copy_user
89841 #define arch_perf_out_copy_user arch_perf_out_copy_user
89842@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
89843 }
89844 #endif
89845
89846-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
89847+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
89848
89849 /* Callchain handling */
89850 extern struct perf_callchain_entry *
89851diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
89852index 1d0af8a..9913530 100644
89853--- a/kernel/events/uprobes.c
89854+++ b/kernel/events/uprobes.c
89855@@ -1671,7 +1671,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
89856 {
89857 struct page *page;
89858 uprobe_opcode_t opcode;
89859- int result;
89860+ long result;
89861
89862 pagefault_disable();
89863 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
89864diff --git a/kernel/exit.c b/kernel/exit.c
89865index 32c58f7..9eb6907 100644
89866--- a/kernel/exit.c
89867+++ b/kernel/exit.c
89868@@ -173,6 +173,10 @@ void release_task(struct task_struct *p)
89869 struct task_struct *leader;
89870 int zap_leader;
89871 repeat:
89872+#ifdef CONFIG_NET
89873+ gr_del_task_from_ip_table(p);
89874+#endif
89875+
89876 /* don't need to get the RCU readlock here - the process is dead and
89877 * can't be modifying its own credentials. But shut RCU-lockdep up */
89878 rcu_read_lock();
89879@@ -668,6 +672,8 @@ void do_exit(long code)
89880 struct task_struct *tsk = current;
89881 int group_dead;
89882
89883+ set_fs(USER_DS);
89884+
89885 profile_task_exit(tsk);
89886
89887 WARN_ON(blk_needs_flush_plug(tsk));
89888@@ -684,7 +690,6 @@ void do_exit(long code)
89889 * mm_release()->clear_child_tid() from writing to a user-controlled
89890 * kernel address.
89891 */
89892- set_fs(USER_DS);
89893
89894 ptrace_event(PTRACE_EVENT_EXIT, code);
89895
89896@@ -742,6 +747,9 @@ void do_exit(long code)
89897 tsk->exit_code = code;
89898 taskstats_exit(tsk, group_dead);
89899
89900+ gr_acl_handle_psacct(tsk, code);
89901+ gr_acl_handle_exit();
89902+
89903 exit_mm(tsk);
89904
89905 if (group_dead)
89906@@ -859,7 +867,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
89907 * Take down every thread in the group. This is called by fatal signals
89908 * as well as by sys_exit_group (below).
89909 */
89910-void
89911+__noreturn void
89912 do_group_exit(int exit_code)
89913 {
89914 struct signal_struct *sig = current->signal;
89915diff --git a/kernel/fork.c b/kernel/fork.c
89916index a91e47d..71c9064 100644
89917--- a/kernel/fork.c
89918+++ b/kernel/fork.c
89919@@ -183,6 +183,48 @@ void thread_info_cache_init(void)
89920 # endif
89921 #endif
89922
89923+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
89924+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
89925+ int node, void **lowmem_stack)
89926+{
89927+ struct page *pages[THREAD_SIZE / PAGE_SIZE];
89928+ void *ret = NULL;
89929+ unsigned int i;
89930+
89931+ *lowmem_stack = alloc_thread_info_node(tsk, node);
89932+ if (*lowmem_stack == NULL)
89933+ goto out;
89934+
89935+ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
89936+ pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE));
89937+
89938+ /* use VM_IOREMAP to gain THREAD_SIZE alignment */
89939+ ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
89940+ if (ret == NULL) {
89941+ free_thread_info(*lowmem_stack);
89942+ *lowmem_stack = NULL;
89943+ }
89944+
89945+out:
89946+ return ret;
89947+}
89948+
89949+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
89950+{
89951+ unmap_process_stacks(tsk);
89952+}
89953+#else
89954+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
89955+ int node, void **lowmem_stack)
89956+{
89957+ return alloc_thread_info_node(tsk, node);
89958+}
89959+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
89960+{
89961+ free_thread_info(ti);
89962+}
89963+#endif
89964+
89965 /* SLAB cache for signal_struct structures (tsk->signal) */
89966 static struct kmem_cache *signal_cachep;
89967
89968@@ -201,18 +243,22 @@ struct kmem_cache *vm_area_cachep;
89969 /* SLAB cache for mm_struct structures (tsk->mm) */
89970 static struct kmem_cache *mm_cachep;
89971
89972-static void account_kernel_stack(struct thread_info *ti, int account)
89973+static void account_kernel_stack(struct task_struct *tsk, struct thread_info *ti, int account)
89974 {
89975+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
89976+ struct zone *zone = page_zone(virt_to_page(tsk->lowmem_stack));
89977+#else
89978 struct zone *zone = page_zone(virt_to_page(ti));
89979+#endif
89980
89981 mod_zone_page_state(zone, NR_KERNEL_STACK, account);
89982 }
89983
89984 void free_task(struct task_struct *tsk)
89985 {
89986- account_kernel_stack(tsk->stack, -1);
89987+ account_kernel_stack(tsk, tsk->stack, -1);
89988 arch_release_thread_info(tsk->stack);
89989- free_thread_info(tsk->stack);
89990+ gr_free_thread_info(tsk, tsk->stack);
89991 rt_mutex_debug_task_free(tsk);
89992 ftrace_graph_exit_task(tsk);
89993 put_seccomp_filter(tsk);
89994@@ -299,6 +345,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89995 struct task_struct *tsk;
89996 struct thread_info *ti;
89997 unsigned long *stackend;
89998+ void *lowmem_stack;
89999 int node = tsk_fork_get_node(orig);
90000 int err;
90001
90002@@ -306,7 +353,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
90003 if (!tsk)
90004 return NULL;
90005
90006- ti = alloc_thread_info_node(tsk, node);
90007+ ti = gr_alloc_thread_info_node(tsk, node, &lowmem_stack);
90008 if (!ti)
90009 goto free_tsk;
90010
90011@@ -315,6 +362,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
90012 goto free_ti;
90013
90014 tsk->stack = ti;
90015+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
90016+ tsk->lowmem_stack = lowmem_stack;
90017+#endif
90018 #ifdef CONFIG_SECCOMP
90019 /*
90020 * We must handle setting up seccomp filters once we're under
90021@@ -332,7 +382,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
90022 *stackend = STACK_END_MAGIC; /* for overflow detection */
90023
90024 #ifdef CONFIG_CC_STACKPROTECTOR
90025- tsk->stack_canary = get_random_int();
90026+ tsk->stack_canary = pax_get_random_long();
90027 #endif
90028
90029 /*
90030@@ -346,24 +396,92 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
90031 tsk->splice_pipe = NULL;
90032 tsk->task_frag.page = NULL;
90033
90034- account_kernel_stack(ti, 1);
90035+ account_kernel_stack(tsk, ti, 1);
90036
90037 return tsk;
90038
90039 free_ti:
90040- free_thread_info(ti);
90041+ gr_free_thread_info(tsk, ti);
90042 free_tsk:
90043 free_task_struct(tsk);
90044 return NULL;
90045 }
90046
90047 #ifdef CONFIG_MMU
90048-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
90049+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
90050+{
90051+ struct vm_area_struct *tmp;
90052+ unsigned long charge;
90053+ struct file *file;
90054+ int retval;
90055+
90056+ charge = 0;
90057+ if (mpnt->vm_flags & VM_ACCOUNT) {
90058+ unsigned long len = vma_pages(mpnt);
90059+
90060+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
90061+ goto fail_nomem;
90062+ charge = len;
90063+ }
90064+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
90065+ if (!tmp)
90066+ goto fail_nomem;
90067+ *tmp = *mpnt;
90068+ tmp->vm_mm = mm;
90069+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
90070+ retval = vma_dup_policy(mpnt, tmp);
90071+ if (retval)
90072+ goto fail_nomem_policy;
90073+ if (anon_vma_fork(tmp, mpnt))
90074+ goto fail_nomem_anon_vma_fork;
90075+ tmp->vm_flags &= ~VM_LOCKED;
90076+ tmp->vm_next = tmp->vm_prev = NULL;
90077+ tmp->vm_mirror = NULL;
90078+ file = tmp->vm_file;
90079+ if (file) {
90080+ struct inode *inode = file_inode(file);
90081+ struct address_space *mapping = file->f_mapping;
90082+
90083+ get_file(file);
90084+ if (tmp->vm_flags & VM_DENYWRITE)
90085+ atomic_dec(&inode->i_writecount);
90086+ mutex_lock(&mapping->i_mmap_mutex);
90087+ if (tmp->vm_flags & VM_SHARED)
90088+ atomic_inc(&mapping->i_mmap_writable);
90089+ flush_dcache_mmap_lock(mapping);
90090+ /* insert tmp into the share list, just after mpnt */
90091+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
90092+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
90093+ else
90094+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
90095+ flush_dcache_mmap_unlock(mapping);
90096+ mutex_unlock(&mapping->i_mmap_mutex);
90097+ }
90098+
90099+ /*
90100+ * Clear hugetlb-related page reserves for children. This only
90101+ * affects MAP_PRIVATE mappings. Faults generated by the child
90102+ * are not guaranteed to succeed, even if read-only
90103+ */
90104+ if (is_vm_hugetlb_page(tmp))
90105+ reset_vma_resv_huge_pages(tmp);
90106+
90107+ return tmp;
90108+
90109+fail_nomem_anon_vma_fork:
90110+ mpol_put(vma_policy(tmp));
90111+fail_nomem_policy:
90112+ kmem_cache_free(vm_area_cachep, tmp);
90113+fail_nomem:
90114+ vm_unacct_memory(charge);
90115+ return NULL;
90116+}
90117+
90118+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
90119 {
90120 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
90121 struct rb_node **rb_link, *rb_parent;
90122 int retval;
90123- unsigned long charge;
90124
90125 uprobe_start_dup_mmap();
90126 down_write(&oldmm->mmap_sem);
90127@@ -391,55 +509,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
90128
90129 prev = NULL;
90130 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
90131- struct file *file;
90132-
90133 if (mpnt->vm_flags & VM_DONTCOPY) {
90134 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
90135 -vma_pages(mpnt));
90136 continue;
90137 }
90138- charge = 0;
90139- if (mpnt->vm_flags & VM_ACCOUNT) {
90140- unsigned long len = vma_pages(mpnt);
90141-
90142- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
90143- goto fail_nomem;
90144- charge = len;
90145- }
90146- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
90147- if (!tmp)
90148- goto fail_nomem;
90149- *tmp = *mpnt;
90150- INIT_LIST_HEAD(&tmp->anon_vma_chain);
90151- retval = vma_dup_policy(mpnt, tmp);
90152- if (retval)
90153- goto fail_nomem_policy;
90154- tmp->vm_mm = mm;
90155- if (anon_vma_fork(tmp, mpnt))
90156- goto fail_nomem_anon_vma_fork;
90157- tmp->vm_flags &= ~VM_LOCKED;
90158- tmp->vm_next = tmp->vm_prev = NULL;
90159- file = tmp->vm_file;
90160- if (file) {
90161- struct inode *inode = file_inode(file);
90162- struct address_space *mapping = file->f_mapping;
90163-
90164- get_file(file);
90165- if (tmp->vm_flags & VM_DENYWRITE)
90166- atomic_dec(&inode->i_writecount);
90167- mutex_lock(&mapping->i_mmap_mutex);
90168- if (tmp->vm_flags & VM_SHARED)
90169- atomic_inc(&mapping->i_mmap_writable);
90170- flush_dcache_mmap_lock(mapping);
90171- /* insert tmp into the share list, just after mpnt */
90172- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
90173- vma_nonlinear_insert(tmp,
90174- &mapping->i_mmap_nonlinear);
90175- else
90176- vma_interval_tree_insert_after(tmp, mpnt,
90177- &mapping->i_mmap);
90178- flush_dcache_mmap_unlock(mapping);
90179- mutex_unlock(&mapping->i_mmap_mutex);
90180+ tmp = dup_vma(mm, oldmm, mpnt);
90181+ if (!tmp) {
90182+ retval = -ENOMEM;
90183+ goto out;
90184 }
90185
90186 /*
90187@@ -471,6 +549,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
90188 if (retval)
90189 goto out;
90190 }
90191+
90192+#ifdef CONFIG_PAX_SEGMEXEC
90193+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
90194+ struct vm_area_struct *mpnt_m;
90195+
90196+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
90197+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
90198+
90199+ if (!mpnt->vm_mirror)
90200+ continue;
90201+
90202+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
90203+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
90204+ mpnt->vm_mirror = mpnt_m;
90205+ } else {
90206+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
90207+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
90208+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
90209+ mpnt->vm_mirror->vm_mirror = mpnt;
90210+ }
90211+ }
90212+ BUG_ON(mpnt_m);
90213+ }
90214+#endif
90215+
90216 /* a new mm has just been created */
90217 arch_dup_mmap(oldmm, mm);
90218 retval = 0;
90219@@ -480,14 +583,6 @@ out:
90220 up_write(&oldmm->mmap_sem);
90221 uprobe_end_dup_mmap();
90222 return retval;
90223-fail_nomem_anon_vma_fork:
90224- mpol_put(vma_policy(tmp));
90225-fail_nomem_policy:
90226- kmem_cache_free(vm_area_cachep, tmp);
90227-fail_nomem:
90228- retval = -ENOMEM;
90229- vm_unacct_memory(charge);
90230- goto out;
90231 }
90232
90233 static inline int mm_alloc_pgd(struct mm_struct *mm)
90234@@ -729,8 +824,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
90235 return ERR_PTR(err);
90236
90237 mm = get_task_mm(task);
90238- if (mm && mm != current->mm &&
90239- !ptrace_may_access(task, mode)) {
90240+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
90241+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
90242 mmput(mm);
90243 mm = ERR_PTR(-EACCES);
90244 }
90245@@ -933,13 +1028,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
90246 spin_unlock(&fs->lock);
90247 return -EAGAIN;
90248 }
90249- fs->users++;
90250+ atomic_inc(&fs->users);
90251 spin_unlock(&fs->lock);
90252 return 0;
90253 }
90254 tsk->fs = copy_fs_struct(fs);
90255 if (!tsk->fs)
90256 return -ENOMEM;
90257+ /* Carry through gr_chroot_dentry and is_chrooted instead
90258+ of recomputing it here. Already copied when the task struct
90259+ is duplicated. This allows pivot_root to not be treated as
90260+ a chroot
90261+ */
90262+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
90263+
90264 return 0;
90265 }
90266
90267@@ -1173,7 +1275,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
90268 * parts of the process environment (as per the clone
90269 * flags). The actual kick-off is left to the caller.
90270 */
90271-static struct task_struct *copy_process(unsigned long clone_flags,
90272+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
90273 unsigned long stack_start,
90274 unsigned long stack_size,
90275 int __user *child_tidptr,
90276@@ -1244,6 +1346,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
90277 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
90278 #endif
90279 retval = -EAGAIN;
90280+
90281+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
90282+
90283 if (atomic_read(&p->real_cred->user->processes) >=
90284 task_rlimit(p, RLIMIT_NPROC)) {
90285 if (p->real_cred->user != INIT_USER &&
90286@@ -1493,6 +1598,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
90287 goto bad_fork_free_pid;
90288 }
90289
90290+ /* synchronizes with gr_set_acls()
90291+ we need to call this past the point of no return for fork()
90292+ */
90293+ gr_copy_label(p);
90294+
90295 if (likely(p->pid)) {
90296 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
90297
90298@@ -1583,6 +1693,8 @@ bad_fork_cleanup_count:
90299 bad_fork_free:
90300 free_task(p);
90301 fork_out:
90302+ gr_log_forkfail(retval);
90303+
90304 return ERR_PTR(retval);
90305 }
90306
90307@@ -1644,6 +1756,7 @@ long do_fork(unsigned long clone_flags,
90308
90309 p = copy_process(clone_flags, stack_start, stack_size,
90310 child_tidptr, NULL, trace);
90311+ add_latent_entropy();
90312 /*
90313 * Do this prior waking up the new thread - the thread pointer
90314 * might get invalid after that point, if the thread exits quickly.
90315@@ -1660,6 +1773,8 @@ long do_fork(unsigned long clone_flags,
90316 if (clone_flags & CLONE_PARENT_SETTID)
90317 put_user(nr, parent_tidptr);
90318
90319+ gr_handle_brute_check();
90320+
90321 if (clone_flags & CLONE_VFORK) {
90322 p->vfork_done = &vfork;
90323 init_completion(&vfork);
90324@@ -1778,7 +1893,7 @@ void __init proc_caches_init(void)
90325 mm_cachep = kmem_cache_create("mm_struct",
90326 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
90327 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
90328- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
90329+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
90330 mmap_init();
90331 nsproxy_cache_init();
90332 }
90333@@ -1818,7 +1933,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
90334 return 0;
90335
90336 /* don't need lock here; in the worst case we'll do useless copy */
90337- if (fs->users == 1)
90338+ if (atomic_read(&fs->users) == 1)
90339 return 0;
90340
90341 *new_fsp = copy_fs_struct(fs);
90342@@ -1930,7 +2045,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
90343 fs = current->fs;
90344 spin_lock(&fs->lock);
90345 current->fs = new_fs;
90346- if (--fs->users)
90347+ gr_set_chroot_entries(current, &current->fs->root);
90348+ if (atomic_dec_return(&fs->users))
90349 new_fs = NULL;
90350 else
90351 new_fs = fs;
90352diff --git a/kernel/futex.c b/kernel/futex.c
90353index f3a3a07..6820bc0 100644
90354--- a/kernel/futex.c
90355+++ b/kernel/futex.c
90356@@ -202,7 +202,7 @@ struct futex_pi_state {
90357 atomic_t refcount;
90358
90359 union futex_key key;
90360-};
90361+} __randomize_layout;
90362
90363 /**
90364 * struct futex_q - The hashed futex queue entry, one per waiting task
90365@@ -236,7 +236,7 @@ struct futex_q {
90366 struct rt_mutex_waiter *rt_waiter;
90367 union futex_key *requeue_pi_key;
90368 u32 bitset;
90369-};
90370+} __randomize_layout;
90371
90372 static const struct futex_q futex_q_init = {
90373 /* list gets initialized in queue_me()*/
90374@@ -396,6 +396,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
90375 struct page *page, *page_head;
90376 int err, ro = 0;
90377
90378+#ifdef CONFIG_PAX_SEGMEXEC
90379+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
90380+ return -EFAULT;
90381+#endif
90382+
90383 /*
90384 * The futex address must be "naturally" aligned.
90385 */
90386@@ -595,7 +600,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
90387
90388 static int get_futex_value_locked(u32 *dest, u32 __user *from)
90389 {
90390- int ret;
90391+ unsigned long ret;
90392
90393 pagefault_disable();
90394 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
90395@@ -641,8 +646,14 @@ static struct futex_pi_state * alloc_pi_state(void)
90396 return pi_state;
90397 }
90398
90399+/*
90400+ * Must be called with the hb lock held.
90401+ */
90402 static void free_pi_state(struct futex_pi_state *pi_state)
90403 {
90404+ if (!pi_state)
90405+ return;
90406+
90407 if (!atomic_dec_and_test(&pi_state->refcount))
90408 return;
90409
90410@@ -1521,15 +1532,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
90411 }
90412
90413 retry:
90414- if (pi_state != NULL) {
90415- /*
90416- * We will have to lookup the pi_state again, so free this one
90417- * to keep the accounting correct.
90418- */
90419- free_pi_state(pi_state);
90420- pi_state = NULL;
90421- }
90422-
90423 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
90424 if (unlikely(ret != 0))
90425 goto out;
90426@@ -1619,6 +1621,8 @@ retry_private:
90427 case 0:
90428 break;
90429 case -EFAULT:
90430+ free_pi_state(pi_state);
90431+ pi_state = NULL;
90432 double_unlock_hb(hb1, hb2);
90433 hb_waiters_dec(hb2);
90434 put_futex_key(&key2);
90435@@ -1634,6 +1638,8 @@ retry_private:
90436 * exit to complete.
90437 * - The user space value changed.
90438 */
90439+ free_pi_state(pi_state);
90440+ pi_state = NULL;
90441 double_unlock_hb(hb1, hb2);
90442 hb_waiters_dec(hb2);
90443 put_futex_key(&key2);
90444@@ -1710,6 +1716,7 @@ retry_private:
90445 }
90446
90447 out_unlock:
90448+ free_pi_state(pi_state);
90449 double_unlock_hb(hb1, hb2);
90450 hb_waiters_dec(hb2);
90451
90452@@ -1727,8 +1734,6 @@ out_put_keys:
90453 out_put_key1:
90454 put_futex_key(&key1);
90455 out:
90456- if (pi_state != NULL)
90457- free_pi_state(pi_state);
90458 return ret ? ret : task_count;
90459 }
90460
90461@@ -3000,6 +3005,7 @@ static void __init futex_detect_cmpxchg(void)
90462 {
90463 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
90464 u32 curval;
90465+ mm_segment_t oldfs;
90466
90467 /*
90468 * This will fail and we want it. Some arch implementations do
90469@@ -3011,8 +3017,11 @@ static void __init futex_detect_cmpxchg(void)
90470 * implementation, the non-functional ones will return
90471 * -ENOSYS.
90472 */
90473+ oldfs = get_fs();
90474+ set_fs(USER_DS);
90475 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
90476 futex_cmpxchg_enabled = 1;
90477+ set_fs(oldfs);
90478 #endif
90479 }
90480
90481diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
90482index 55c8c93..9ba7ad6 100644
90483--- a/kernel/futex_compat.c
90484+++ b/kernel/futex_compat.c
90485@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
90486 return 0;
90487 }
90488
90489-static void __user *futex_uaddr(struct robust_list __user *entry,
90490+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
90491 compat_long_t futex_offset)
90492 {
90493 compat_uptr_t base = ptr_to_compat(entry);
90494diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
90495index b358a80..fc25240 100644
90496--- a/kernel/gcov/base.c
90497+++ b/kernel/gcov/base.c
90498@@ -114,11 +114,6 @@ void gcov_enable_events(void)
90499 }
90500
90501 #ifdef CONFIG_MODULES
90502-static inline int within(void *addr, void *start, unsigned long size)
90503-{
90504- return ((addr >= start) && (addr < start + size));
90505-}
90506-
90507 /* Update list and generate events when modules are unloaded. */
90508 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
90509 void *data)
90510@@ -133,7 +128,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
90511
90512 /* Remove entries located in module from linked list. */
90513 while ((info = gcov_info_next(info))) {
90514- if (within(info, mod->module_core, mod->core_size)) {
90515+ if (within_module_core_rw((unsigned long)info, mod)) {
90516 gcov_info_unlink(prev, info);
90517 if (gcov_events_enabled)
90518 gcov_event(GCOV_REMOVE, info);
90519diff --git a/kernel/jump_label.c b/kernel/jump_label.c
90520index 9019f15..9a3c42e 100644
90521--- a/kernel/jump_label.c
90522+++ b/kernel/jump_label.c
90523@@ -14,6 +14,7 @@
90524 #include <linux/err.h>
90525 #include <linux/static_key.h>
90526 #include <linux/jump_label_ratelimit.h>
90527+#include <linux/mm.h>
90528
90529 #ifdef HAVE_JUMP_LABEL
90530
90531@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
90532
90533 size = (((unsigned long)stop - (unsigned long)start)
90534 / sizeof(struct jump_entry));
90535+ pax_open_kernel();
90536 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
90537+ pax_close_kernel();
90538 }
90539
90540 static void jump_label_update(struct static_key *key, int enable);
90541@@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
90542 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
90543 struct jump_entry *iter;
90544
90545+ pax_open_kernel();
90546 for (iter = iter_start; iter < iter_stop; iter++) {
90547 if (within_module_init(iter->code, mod))
90548 iter->code = 0;
90549 }
90550+ pax_close_kernel();
90551 }
90552
90553 static int
90554diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
90555index ae51670..c1a9796 100644
90556--- a/kernel/kallsyms.c
90557+++ b/kernel/kallsyms.c
90558@@ -11,6 +11,9 @@
90559 * Changed the compression method from stem compression to "table lookup"
90560 * compression (see scripts/kallsyms.c for a more complete description)
90561 */
90562+#ifdef CONFIG_GRKERNSEC_HIDESYM
90563+#define __INCLUDED_BY_HIDESYM 1
90564+#endif
90565 #include <linux/kallsyms.h>
90566 #include <linux/module.h>
90567 #include <linux/init.h>
90568@@ -54,12 +57,33 @@ extern const unsigned long kallsyms_markers[] __weak;
90569
90570 static inline int is_kernel_inittext(unsigned long addr)
90571 {
90572+ if (system_state != SYSTEM_BOOTING)
90573+ return 0;
90574+
90575 if (addr >= (unsigned long)_sinittext
90576 && addr <= (unsigned long)_einittext)
90577 return 1;
90578 return 0;
90579 }
90580
90581+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
90582+#ifdef CONFIG_MODULES
90583+static inline int is_module_text(unsigned long addr)
90584+{
90585+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
90586+ return 1;
90587+
90588+ addr = ktla_ktva(addr);
90589+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
90590+}
90591+#else
90592+static inline int is_module_text(unsigned long addr)
90593+{
90594+ return 0;
90595+}
90596+#endif
90597+#endif
90598+
90599 static inline int is_kernel_text(unsigned long addr)
90600 {
90601 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
90602@@ -70,13 +94,28 @@ static inline int is_kernel_text(unsigned long addr)
90603
90604 static inline int is_kernel(unsigned long addr)
90605 {
90606+
90607+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
90608+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
90609+ return 1;
90610+
90611+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
90612+#else
90613 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
90614+#endif
90615+
90616 return 1;
90617 return in_gate_area_no_mm(addr);
90618 }
90619
90620 static int is_ksym_addr(unsigned long addr)
90621 {
90622+
90623+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
90624+ if (is_module_text(addr))
90625+ return 0;
90626+#endif
90627+
90628 if (all_var)
90629 return is_kernel(addr);
90630
90631@@ -481,7 +520,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
90632
90633 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
90634 {
90635- iter->name[0] = '\0';
90636 iter->nameoff = get_symbol_offset(new_pos);
90637 iter->pos = new_pos;
90638 }
90639@@ -529,6 +567,11 @@ static int s_show(struct seq_file *m, void *p)
90640 {
90641 struct kallsym_iter *iter = m->private;
90642
90643+#ifdef CONFIG_GRKERNSEC_HIDESYM
90644+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
90645+ return 0;
90646+#endif
90647+
90648 /* Some debugging symbols have no name. Ignore them. */
90649 if (!iter->name[0])
90650 return 0;
90651@@ -542,6 +585,7 @@ static int s_show(struct seq_file *m, void *p)
90652 */
90653 type = iter->exported ? toupper(iter->type) :
90654 tolower(iter->type);
90655+
90656 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
90657 type, iter->name, iter->module_name);
90658 } else
90659@@ -567,7 +611,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
90660 struct kallsym_iter *iter;
90661 int ret;
90662
90663- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
90664+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
90665 if (!iter)
90666 return -ENOMEM;
90667 reset_iter(iter, 0);
90668diff --git a/kernel/kcmp.c b/kernel/kcmp.c
90669index 0aa69ea..a7fcafb 100644
90670--- a/kernel/kcmp.c
90671+++ b/kernel/kcmp.c
90672@@ -100,6 +100,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
90673 struct task_struct *task1, *task2;
90674 int ret;
90675
90676+#ifdef CONFIG_GRKERNSEC
90677+ return -ENOSYS;
90678+#endif
90679+
90680 rcu_read_lock();
90681
90682 /*
90683diff --git a/kernel/kexec.c b/kernel/kexec.c
90684index 2bee072..8979af8 100644
90685--- a/kernel/kexec.c
90686+++ b/kernel/kexec.c
90687@@ -1349,7 +1349,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
90688 compat_ulong_t, flags)
90689 {
90690 struct compat_kexec_segment in;
90691- struct kexec_segment out, __user *ksegments;
90692+ struct kexec_segment out;
90693+ struct kexec_segment __user *ksegments;
90694 unsigned long i, result;
90695
90696 /* Don't allow clients that don't understand the native
90697diff --git a/kernel/kmod.c b/kernel/kmod.c
90698index 8637e04..8b1d0d8 100644
90699--- a/kernel/kmod.c
90700+++ b/kernel/kmod.c
90701@@ -75,7 +75,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
90702 kfree(info->argv);
90703 }
90704
90705-static int call_modprobe(char *module_name, int wait)
90706+static int call_modprobe(char *module_name, char *module_param, int wait)
90707 {
90708 struct subprocess_info *info;
90709 static char *envp[] = {
90710@@ -85,7 +85,7 @@ static int call_modprobe(char *module_name, int wait)
90711 NULL
90712 };
90713
90714- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
90715+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
90716 if (!argv)
90717 goto out;
90718
90719@@ -97,7 +97,8 @@ static int call_modprobe(char *module_name, int wait)
90720 argv[1] = "-q";
90721 argv[2] = "--";
90722 argv[3] = module_name; /* check free_modprobe_argv() */
90723- argv[4] = NULL;
90724+ argv[4] = module_param;
90725+ argv[5] = NULL;
90726
90727 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
90728 NULL, free_modprobe_argv, NULL);
90729@@ -129,9 +130,8 @@ out:
90730 * If module auto-loading support is disabled then this function
90731 * becomes a no-operation.
90732 */
90733-int __request_module(bool wait, const char *fmt, ...)
90734+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
90735 {
90736- va_list args;
90737 char module_name[MODULE_NAME_LEN];
90738 unsigned int max_modprobes;
90739 int ret;
90740@@ -150,9 +150,7 @@ int __request_module(bool wait, const char *fmt, ...)
90741 if (!modprobe_path[0])
90742 return 0;
90743
90744- va_start(args, fmt);
90745- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
90746- va_end(args);
90747+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
90748 if (ret >= MODULE_NAME_LEN)
90749 return -ENAMETOOLONG;
90750
90751@@ -160,6 +158,20 @@ int __request_module(bool wait, const char *fmt, ...)
90752 if (ret)
90753 return ret;
90754
90755+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90756+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
90757+ /* hack to workaround consolekit/udisks stupidity */
90758+ read_lock(&tasklist_lock);
90759+ if (!strcmp(current->comm, "mount") &&
90760+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
90761+ read_unlock(&tasklist_lock);
90762+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
90763+ return -EPERM;
90764+ }
90765+ read_unlock(&tasklist_lock);
90766+ }
90767+#endif
90768+
90769 /* If modprobe needs a service that is in a module, we get a recursive
90770 * loop. Limit the number of running kmod threads to max_threads/2 or
90771 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
90772@@ -188,11 +200,52 @@ int __request_module(bool wait, const char *fmt, ...)
90773
90774 trace_module_request(module_name, wait, _RET_IP_);
90775
90776- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
90777+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
90778
90779 atomic_dec(&kmod_concurrent);
90780 return ret;
90781 }
90782+
90783+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
90784+{
90785+ va_list args;
90786+ int ret;
90787+
90788+ va_start(args, fmt);
90789+ ret = ____request_module(wait, module_param, fmt, args);
90790+ va_end(args);
90791+
90792+ return ret;
90793+}
90794+
90795+int __request_module(bool wait, const char *fmt, ...)
90796+{
90797+ va_list args;
90798+ int ret;
90799+
90800+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90801+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
90802+ char module_param[MODULE_NAME_LEN];
90803+
90804+ memset(module_param, 0, sizeof(module_param));
90805+
90806+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
90807+
90808+ va_start(args, fmt);
90809+ ret = ____request_module(wait, module_param, fmt, args);
90810+ va_end(args);
90811+
90812+ return ret;
90813+ }
90814+#endif
90815+
90816+ va_start(args, fmt);
90817+ ret = ____request_module(wait, NULL, fmt, args);
90818+ va_end(args);
90819+
90820+ return ret;
90821+}
90822+
90823 EXPORT_SYMBOL(__request_module);
90824 #endif /* CONFIG_MODULES */
90825
90826@@ -218,6 +271,20 @@ static int ____call_usermodehelper(void *data)
90827 */
90828 set_user_nice(current, 0);
90829
90830+#ifdef CONFIG_GRKERNSEC
90831+ /* this is race-free as far as userland is concerned as we copied
90832+ out the path to be used prior to this point and are now operating
90833+ on that copy
90834+ */
90835+ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
90836+ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) &&
90837+ strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) {
90838+ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of /sbin and system library paths\n", sub_info->path);
90839+ retval = -EPERM;
90840+ goto fail;
90841+ }
90842+#endif
90843+
90844 retval = -ENOMEM;
90845 new = prepare_kernel_cred(current);
90846 if (!new)
90847@@ -240,8 +307,8 @@ static int ____call_usermodehelper(void *data)
90848 commit_creds(new);
90849
90850 retval = do_execve(getname_kernel(sub_info->path),
90851- (const char __user *const __user *)sub_info->argv,
90852- (const char __user *const __user *)sub_info->envp);
90853+ (const char __user *const __force_user *)sub_info->argv,
90854+ (const char __user *const __force_user *)sub_info->envp);
90855 if (!retval)
90856 return 0;
90857
90858@@ -260,6 +327,10 @@ static int call_helper(void *data)
90859
90860 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
90861 {
90862+#ifdef CONFIG_GRKERNSEC
90863+ kfree(info->path);
90864+ info->path = info->origpath;
90865+#endif
90866 if (info->cleanup)
90867 (*info->cleanup)(info);
90868 kfree(info);
90869@@ -300,7 +371,7 @@ static int wait_for_helper(void *data)
90870 *
90871 * Thus the __user pointer cast is valid here.
90872 */
90873- sys_wait4(pid, (int __user *)&ret, 0, NULL);
90874+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
90875
90876 /*
90877 * If ret is 0, either ____call_usermodehelper failed and the
90878@@ -539,7 +610,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
90879 goto out;
90880
90881 INIT_WORK(&sub_info->work, __call_usermodehelper);
90882+#ifdef CONFIG_GRKERNSEC
90883+ sub_info->origpath = path;
90884+ sub_info->path = kstrdup(path, gfp_mask);
90885+#else
90886 sub_info->path = path;
90887+#endif
90888 sub_info->argv = argv;
90889 sub_info->envp = envp;
90890
90891@@ -647,7 +723,7 @@ EXPORT_SYMBOL(call_usermodehelper);
90892 static int proc_cap_handler(struct ctl_table *table, int write,
90893 void __user *buffer, size_t *lenp, loff_t *ppos)
90894 {
90895- struct ctl_table t;
90896+ ctl_table_no_const t;
90897 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
90898 kernel_cap_t new_cap;
90899 int err, i;
90900diff --git a/kernel/kprobes.c b/kernel/kprobes.c
90901index 3995f54..e247879 100644
90902--- a/kernel/kprobes.c
90903+++ b/kernel/kprobes.c
90904@@ -31,6 +31,9 @@
90905 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
90906 * <prasanna@in.ibm.com> added function-return probes.
90907 */
90908+#ifdef CONFIG_GRKERNSEC_HIDESYM
90909+#define __INCLUDED_BY_HIDESYM 1
90910+#endif
90911 #include <linux/kprobes.h>
90912 #include <linux/hash.h>
90913 #include <linux/init.h>
90914@@ -122,12 +125,12 @@ enum kprobe_slot_state {
90915
90916 static void *alloc_insn_page(void)
90917 {
90918- return module_alloc(PAGE_SIZE);
90919+ return module_alloc_exec(PAGE_SIZE);
90920 }
90921
90922 static void free_insn_page(void *page)
90923 {
90924- module_free(NULL, page);
90925+ module_free_exec(NULL, page);
90926 }
90927
90928 struct kprobe_insn_cache kprobe_insn_slots = {
90929@@ -2187,11 +2190,11 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
90930 kprobe_type = "k";
90931
90932 if (sym)
90933- seq_printf(pi, "%p %s %s+0x%x %s ",
90934+ seq_printf(pi, "%pK %s %s+0x%x %s ",
90935 p->addr, kprobe_type, sym, offset,
90936 (modname ? modname : " "));
90937 else
90938- seq_printf(pi, "%p %s %p ",
90939+ seq_printf(pi, "%pK %s %pK ",
90940 p->addr, kprobe_type, p->addr);
90941
90942 if (!pp)
90943diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
90944index 6683cce..daf8999 100644
90945--- a/kernel/ksysfs.c
90946+++ b/kernel/ksysfs.c
90947@@ -50,6 +50,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
90948 {
90949 if (count+1 > UEVENT_HELPER_PATH_LEN)
90950 return -ENOENT;
90951+ if (!capable(CAP_SYS_ADMIN))
90952+ return -EPERM;
90953 memcpy(uevent_helper, buf, count);
90954 uevent_helper[count] = '\0';
90955 if (count && uevent_helper[count-1] == '\n')
90956@@ -176,7 +178,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
90957 return count;
90958 }
90959
90960-static struct bin_attribute notes_attr = {
90961+static bin_attribute_no_const notes_attr __read_only = {
90962 .attr = {
90963 .name = "notes",
90964 .mode = S_IRUGO,
90965diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
90966index 88d0d44..e9ce0ee 100644
90967--- a/kernel/locking/lockdep.c
90968+++ b/kernel/locking/lockdep.c
90969@@ -599,6 +599,10 @@ static int static_obj(void *obj)
90970 end = (unsigned long) &_end,
90971 addr = (unsigned long) obj;
90972
90973+#ifdef CONFIG_PAX_KERNEXEC
90974+ start = ktla_ktva(start);
90975+#endif
90976+
90977 /*
90978 * static variable?
90979 */
90980@@ -740,6 +744,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
90981 if (!static_obj(lock->key)) {
90982 debug_locks_off();
90983 printk("INFO: trying to register non-static key.\n");
90984+ printk("lock:%pS key:%pS.\n", lock, lock->key);
90985 printk("the code is fine but needs lockdep annotation.\n");
90986 printk("turning off the locking correctness validator.\n");
90987 dump_stack();
90988@@ -3081,7 +3086,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
90989 if (!class)
90990 return 0;
90991 }
90992- atomic_inc((atomic_t *)&class->ops);
90993+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
90994 if (very_verbose(class)) {
90995 printk("\nacquire class [%p] %s", class->key, class->name);
90996 if (class->name_version > 1)
90997diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
90998index ef43ac4..2720dfa 100644
90999--- a/kernel/locking/lockdep_proc.c
91000+++ b/kernel/locking/lockdep_proc.c
91001@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
91002 return 0;
91003 }
91004
91005- seq_printf(m, "%p", class->key);
91006+ seq_printf(m, "%pK", class->key);
91007 #ifdef CONFIG_DEBUG_LOCKDEP
91008 seq_printf(m, " OPS:%8ld", class->ops);
91009 #endif
91010@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
91011
91012 list_for_each_entry(entry, &class->locks_after, entry) {
91013 if (entry->distance == 1) {
91014- seq_printf(m, " -> [%p] ", entry->class->key);
91015+ seq_printf(m, " -> [%pK] ", entry->class->key);
91016 print_name(m, entry->class);
91017 seq_puts(m, "\n");
91018 }
91019@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
91020 if (!class->key)
91021 continue;
91022
91023- seq_printf(m, "[%p] ", class->key);
91024+ seq_printf(m, "[%pK] ", class->key);
91025 print_name(m, class);
91026 seq_puts(m, "\n");
91027 }
91028@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
91029 if (!i)
91030 seq_line(m, '-', 40-namelen, namelen);
91031
91032- snprintf(ip, sizeof(ip), "[<%p>]",
91033+ snprintf(ip, sizeof(ip), "[<%pK>]",
91034 (void *)class->contention_point[i]);
91035 seq_printf(m, "%40s %14lu %29s %pS\n",
91036 name, stats->contention_point[i],
91037@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
91038 if (!i)
91039 seq_line(m, '-', 40-namelen, namelen);
91040
91041- snprintf(ip, sizeof(ip), "[<%p>]",
91042+ snprintf(ip, sizeof(ip), "[<%pK>]",
91043 (void *)class->contending_point[i]);
91044 seq_printf(m, "%40s %14lu %29s %pS\n",
91045 name, stats->contending_point[i],
91046diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
91047index 9887a90..0cd2b1d 100644
91048--- a/kernel/locking/mcs_spinlock.c
91049+++ b/kernel/locking/mcs_spinlock.c
91050@@ -100,7 +100,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
91051
91052 prev = decode_cpu(old);
91053 node->prev = prev;
91054- ACCESS_ONCE(prev->next) = node;
91055+ ACCESS_ONCE_RW(prev->next) = node;
91056
91057 /*
91058 * Normally @prev is untouchable after the above store; because at that
91059@@ -172,8 +172,8 @@ unqueue:
91060 * it will wait in Step-A.
91061 */
91062
91063- ACCESS_ONCE(next->prev) = prev;
91064- ACCESS_ONCE(prev->next) = next;
91065+ ACCESS_ONCE_RW(next->prev) = prev;
91066+ ACCESS_ONCE_RW(prev->next) = next;
91067
91068 return false;
91069 }
91070@@ -195,13 +195,13 @@ void osq_unlock(struct optimistic_spin_queue *lock)
91071 node = this_cpu_ptr(&osq_node);
91072 next = xchg(&node->next, NULL);
91073 if (next) {
91074- ACCESS_ONCE(next->locked) = 1;
91075+ ACCESS_ONCE_RW(next->locked) = 1;
91076 return;
91077 }
91078
91079 next = osq_wait_next(lock, node, NULL);
91080 if (next)
91081- ACCESS_ONCE(next->locked) = 1;
91082+ ACCESS_ONCE_RW(next->locked) = 1;
91083 }
91084
91085 #endif
91086diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
91087index 23e89c5..8558eac 100644
91088--- a/kernel/locking/mcs_spinlock.h
91089+++ b/kernel/locking/mcs_spinlock.h
91090@@ -81,7 +81,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
91091 */
91092 return;
91093 }
91094- ACCESS_ONCE(prev->next) = node;
91095+ ACCESS_ONCE_RW(prev->next) = node;
91096
91097 /* Wait until the lock holder passes the lock down. */
91098 arch_mcs_spin_lock_contended(&node->locked);
91099diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
91100index 5cf6731..ce3bc5a 100644
91101--- a/kernel/locking/mutex-debug.c
91102+++ b/kernel/locking/mutex-debug.c
91103@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
91104 }
91105
91106 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
91107- struct thread_info *ti)
91108+ struct task_struct *task)
91109 {
91110 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
91111
91112 /* Mark the current thread as blocked on the lock: */
91113- ti->task->blocked_on = waiter;
91114+ task->blocked_on = waiter;
91115 }
91116
91117 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
91118- struct thread_info *ti)
91119+ struct task_struct *task)
91120 {
91121 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
91122- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
91123- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
91124- ti->task->blocked_on = NULL;
91125+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
91126+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
91127+ task->blocked_on = NULL;
91128
91129 list_del_init(&waiter->list);
91130 waiter->task = NULL;
91131diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
91132index 0799fd3..d06ae3b 100644
91133--- a/kernel/locking/mutex-debug.h
91134+++ b/kernel/locking/mutex-debug.h
91135@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
91136 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
91137 extern void debug_mutex_add_waiter(struct mutex *lock,
91138 struct mutex_waiter *waiter,
91139- struct thread_info *ti);
91140+ struct task_struct *task);
91141 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
91142- struct thread_info *ti);
91143+ struct task_struct *task);
91144 extern void debug_mutex_unlock(struct mutex *lock);
91145 extern void debug_mutex_init(struct mutex *lock, const char *name,
91146 struct lock_class_key *key);
91147diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
91148index ae712b2..d0d4a41 100644
91149--- a/kernel/locking/mutex.c
91150+++ b/kernel/locking/mutex.c
91151@@ -486,7 +486,7 @@ slowpath:
91152 goto skip_wait;
91153
91154 debug_mutex_lock_common(lock, &waiter);
91155- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
91156+ debug_mutex_add_waiter(lock, &waiter, task);
91157
91158 /* add waiting tasks to the end of the waitqueue (FIFO): */
91159 list_add_tail(&waiter.list, &lock->wait_list);
91160@@ -531,7 +531,7 @@ slowpath:
91161 schedule_preempt_disabled();
91162 spin_lock_mutex(&lock->wait_lock, flags);
91163 }
91164- mutex_remove_waiter(lock, &waiter, current_thread_info());
91165+ mutex_remove_waiter(lock, &waiter, task);
91166 /* set it to 0 if there are no waiters left: */
91167 if (likely(list_empty(&lock->wait_list)))
91168 atomic_set(&lock->count, 0);
91169@@ -568,7 +568,7 @@ skip_wait:
91170 return 0;
91171
91172 err:
91173- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
91174+ mutex_remove_waiter(lock, &waiter, task);
91175 spin_unlock_mutex(&lock->wait_lock, flags);
91176 debug_mutex_free_waiter(&waiter);
91177 mutex_release(&lock->dep_map, 1, ip);
91178diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
91179index 1d96dd0..994ff19 100644
91180--- a/kernel/locking/rtmutex-tester.c
91181+++ b/kernel/locking/rtmutex-tester.c
91182@@ -22,7 +22,7 @@
91183 #define MAX_RT_TEST_MUTEXES 8
91184
91185 static spinlock_t rttest_lock;
91186-static atomic_t rttest_event;
91187+static atomic_unchecked_t rttest_event;
91188
91189 struct test_thread_data {
91190 int opcode;
91191@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91192
91193 case RTTEST_LOCKCONT:
91194 td->mutexes[td->opdata] = 1;
91195- td->event = atomic_add_return(1, &rttest_event);
91196+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91197 return 0;
91198
91199 case RTTEST_RESET:
91200@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91201 return 0;
91202
91203 case RTTEST_RESETEVENT:
91204- atomic_set(&rttest_event, 0);
91205+ atomic_set_unchecked(&rttest_event, 0);
91206 return 0;
91207
91208 default:
91209@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91210 return ret;
91211
91212 td->mutexes[id] = 1;
91213- td->event = atomic_add_return(1, &rttest_event);
91214+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91215 rt_mutex_lock(&mutexes[id]);
91216- td->event = atomic_add_return(1, &rttest_event);
91217+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91218 td->mutexes[id] = 4;
91219 return 0;
91220
91221@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91222 return ret;
91223
91224 td->mutexes[id] = 1;
91225- td->event = atomic_add_return(1, &rttest_event);
91226+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91227 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
91228- td->event = atomic_add_return(1, &rttest_event);
91229+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91230 td->mutexes[id] = ret ? 0 : 4;
91231 return ret ? -EINTR : 0;
91232
91233@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
91234 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
91235 return ret;
91236
91237- td->event = atomic_add_return(1, &rttest_event);
91238+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91239 rt_mutex_unlock(&mutexes[id]);
91240- td->event = atomic_add_return(1, &rttest_event);
91241+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91242 td->mutexes[id] = 0;
91243 return 0;
91244
91245@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
91246 break;
91247
91248 td->mutexes[dat] = 2;
91249- td->event = atomic_add_return(1, &rttest_event);
91250+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91251 break;
91252
91253 default:
91254@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
91255 return;
91256
91257 td->mutexes[dat] = 3;
91258- td->event = atomic_add_return(1, &rttest_event);
91259+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91260 break;
91261
91262 case RTTEST_LOCKNOWAIT:
91263@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
91264 return;
91265
91266 td->mutexes[dat] = 1;
91267- td->event = atomic_add_return(1, &rttest_event);
91268+ td->event = atomic_add_return_unchecked(1, &rttest_event);
91269 return;
91270
91271 default:
91272diff --git a/kernel/module.c b/kernel/module.c
91273index 03214bd2..6242887 100644
91274--- a/kernel/module.c
91275+++ b/kernel/module.c
91276@@ -60,6 +60,7 @@
91277 #include <linux/jump_label.h>
91278 #include <linux/pfn.h>
91279 #include <linux/bsearch.h>
91280+#include <linux/grsecurity.h>
91281 #include <uapi/linux/module.h>
91282 #include "module-internal.h"
91283
91284@@ -156,7 +157,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
91285
91286 /* Bounds of module allocation, for speeding __module_address.
91287 * Protected by module_mutex. */
91288-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
91289+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
91290+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
91291
91292 int register_module_notifier(struct notifier_block * nb)
91293 {
91294@@ -323,7 +325,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
91295 return true;
91296
91297 list_for_each_entry_rcu(mod, &modules, list) {
91298- struct symsearch arr[] = {
91299+ struct symsearch modarr[] = {
91300 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
91301 NOT_GPL_ONLY, false },
91302 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
91303@@ -348,7 +350,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
91304 if (mod->state == MODULE_STATE_UNFORMED)
91305 continue;
91306
91307- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
91308+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
91309 return true;
91310 }
91311 return false;
91312@@ -488,7 +490,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
91313 if (!pcpusec->sh_size)
91314 return 0;
91315
91316- if (align > PAGE_SIZE) {
91317+ if (align-1 >= PAGE_SIZE) {
91318 pr_warn("%s: per-cpu alignment %li > %li\n",
91319 mod->name, align, PAGE_SIZE);
91320 align = PAGE_SIZE;
91321@@ -1060,7 +1062,7 @@ struct module_attribute module_uevent =
91322 static ssize_t show_coresize(struct module_attribute *mattr,
91323 struct module_kobject *mk, char *buffer)
91324 {
91325- return sprintf(buffer, "%u\n", mk->mod->core_size);
91326+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
91327 }
91328
91329 static struct module_attribute modinfo_coresize =
91330@@ -1069,7 +1071,7 @@ static struct module_attribute modinfo_coresize =
91331 static ssize_t show_initsize(struct module_attribute *mattr,
91332 struct module_kobject *mk, char *buffer)
91333 {
91334- return sprintf(buffer, "%u\n", mk->mod->init_size);
91335+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
91336 }
91337
91338 static struct module_attribute modinfo_initsize =
91339@@ -1161,12 +1163,29 @@ static int check_version(Elf_Shdr *sechdrs,
91340 goto bad_version;
91341 }
91342
91343+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
91344+ /*
91345+ * avoid potentially printing jibberish on attempted load
91346+ * of a module randomized with a different seed
91347+ */
91348+ pr_warn("no symbol version for %s\n", symname);
91349+#else
91350 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
91351+#endif
91352 return 0;
91353
91354 bad_version:
91355+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
91356+ /*
91357+ * avoid potentially printing jibberish on attempted load
91358+ * of a module randomized with a different seed
91359+ */
91360+ printk("attempted module disagrees about version of symbol %s\n",
91361+ symname);
91362+#else
91363 printk("%s: disagrees about version of symbol %s\n",
91364 mod->name, symname);
91365+#endif
91366 return 0;
91367 }
91368
91369@@ -1282,7 +1301,7 @@ resolve_symbol_wait(struct module *mod,
91370 */
91371 #ifdef CONFIG_SYSFS
91372
91373-#ifdef CONFIG_KALLSYMS
91374+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
91375 static inline bool sect_empty(const Elf_Shdr *sect)
91376 {
91377 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
91378@@ -1422,7 +1441,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
91379 {
91380 unsigned int notes, loaded, i;
91381 struct module_notes_attrs *notes_attrs;
91382- struct bin_attribute *nattr;
91383+ bin_attribute_no_const *nattr;
91384
91385 /* failed to create section attributes, so can't create notes */
91386 if (!mod->sect_attrs)
91387@@ -1534,7 +1553,7 @@ static void del_usage_links(struct module *mod)
91388 static int module_add_modinfo_attrs(struct module *mod)
91389 {
91390 struct module_attribute *attr;
91391- struct module_attribute *temp_attr;
91392+ module_attribute_no_const *temp_attr;
91393 int error = 0;
91394 int i;
91395
91396@@ -1755,21 +1774,21 @@ static void set_section_ro_nx(void *base,
91397
91398 static void unset_module_core_ro_nx(struct module *mod)
91399 {
91400- set_page_attributes(mod->module_core + mod->core_text_size,
91401- mod->module_core + mod->core_size,
91402+ set_page_attributes(mod->module_core_rw,
91403+ mod->module_core_rw + mod->core_size_rw,
91404 set_memory_x);
91405- set_page_attributes(mod->module_core,
91406- mod->module_core + mod->core_ro_size,
91407+ set_page_attributes(mod->module_core_rx,
91408+ mod->module_core_rx + mod->core_size_rx,
91409 set_memory_rw);
91410 }
91411
91412 static void unset_module_init_ro_nx(struct module *mod)
91413 {
91414- set_page_attributes(mod->module_init + mod->init_text_size,
91415- mod->module_init + mod->init_size,
91416+ set_page_attributes(mod->module_init_rw,
91417+ mod->module_init_rw + mod->init_size_rw,
91418 set_memory_x);
91419- set_page_attributes(mod->module_init,
91420- mod->module_init + mod->init_ro_size,
91421+ set_page_attributes(mod->module_init_rx,
91422+ mod->module_init_rx + mod->init_size_rx,
91423 set_memory_rw);
91424 }
91425
91426@@ -1782,14 +1801,14 @@ void set_all_modules_text_rw(void)
91427 list_for_each_entry_rcu(mod, &modules, list) {
91428 if (mod->state == MODULE_STATE_UNFORMED)
91429 continue;
91430- if ((mod->module_core) && (mod->core_text_size)) {
91431- set_page_attributes(mod->module_core,
91432- mod->module_core + mod->core_text_size,
91433+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
91434+ set_page_attributes(mod->module_core_rx,
91435+ mod->module_core_rx + mod->core_size_rx,
91436 set_memory_rw);
91437 }
91438- if ((mod->module_init) && (mod->init_text_size)) {
91439- set_page_attributes(mod->module_init,
91440- mod->module_init + mod->init_text_size,
91441+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
91442+ set_page_attributes(mod->module_init_rx,
91443+ mod->module_init_rx + mod->init_size_rx,
91444 set_memory_rw);
91445 }
91446 }
91447@@ -1805,14 +1824,14 @@ void set_all_modules_text_ro(void)
91448 list_for_each_entry_rcu(mod, &modules, list) {
91449 if (mod->state == MODULE_STATE_UNFORMED)
91450 continue;
91451- if ((mod->module_core) && (mod->core_text_size)) {
91452- set_page_attributes(mod->module_core,
91453- mod->module_core + mod->core_text_size,
91454+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
91455+ set_page_attributes(mod->module_core_rx,
91456+ mod->module_core_rx + mod->core_size_rx,
91457 set_memory_ro);
91458 }
91459- if ((mod->module_init) && (mod->init_text_size)) {
91460- set_page_attributes(mod->module_init,
91461- mod->module_init + mod->init_text_size,
91462+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
91463+ set_page_attributes(mod->module_init_rx,
91464+ mod->module_init_rx + mod->init_size_rx,
91465 set_memory_ro);
91466 }
91467 }
91468@@ -1842,7 +1861,9 @@ static void free_module(struct module *mod)
91469
91470 /* We leave it in list to prevent duplicate loads, but make sure
91471 * that noone uses it while it's being deconstructed. */
91472+ mutex_lock(&module_mutex);
91473 mod->state = MODULE_STATE_UNFORMED;
91474+ mutex_unlock(&module_mutex);
91475
91476 /* Remove dynamic debug info */
91477 ddebug_remove_module(mod->name);
91478@@ -1863,16 +1884,19 @@ static void free_module(struct module *mod)
91479
91480 /* This may be NULL, but that's OK */
91481 unset_module_init_ro_nx(mod);
91482- module_free(mod, mod->module_init);
91483+ module_free(mod, mod->module_init_rw);
91484+ module_free_exec(mod, mod->module_init_rx);
91485 kfree(mod->args);
91486 percpu_modfree(mod);
91487
91488 /* Free lock-classes: */
91489- lockdep_free_key_range(mod->module_core, mod->core_size);
91490+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
91491+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
91492
91493 /* Finally, free the core (containing the module structure) */
91494 unset_module_core_ro_nx(mod);
91495- module_free(mod, mod->module_core);
91496+ module_free_exec(mod, mod->module_core_rx);
91497+ module_free(mod, mod->module_core_rw);
91498
91499 #ifdef CONFIG_MPU
91500 update_protections(current->mm);
91501@@ -1941,9 +1965,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
91502 int ret = 0;
91503 const struct kernel_symbol *ksym;
91504
91505+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91506+ int is_fs_load = 0;
91507+ int register_filesystem_found = 0;
91508+ char *p;
91509+
91510+ p = strstr(mod->args, "grsec_modharden_fs");
91511+ if (p) {
91512+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
91513+ /* copy \0 as well */
91514+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
91515+ is_fs_load = 1;
91516+ }
91517+#endif
91518+
91519 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
91520 const char *name = info->strtab + sym[i].st_name;
91521
91522+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91523+ /* it's a real shame this will never get ripped and copied
91524+ upstream! ;(
91525+ */
91526+ if (is_fs_load && !strcmp(name, "register_filesystem"))
91527+ register_filesystem_found = 1;
91528+#endif
91529+
91530 switch (sym[i].st_shndx) {
91531 case SHN_COMMON:
91532 /* Ignore common symbols */
91533@@ -1968,7 +2014,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
91534 ksym = resolve_symbol_wait(mod, info, name);
91535 /* Ok if resolved. */
91536 if (ksym && !IS_ERR(ksym)) {
91537+ pax_open_kernel();
91538 sym[i].st_value = ksym->value;
91539+ pax_close_kernel();
91540 break;
91541 }
91542
91543@@ -1987,11 +2035,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
91544 secbase = (unsigned long)mod_percpu(mod);
91545 else
91546 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
91547+ pax_open_kernel();
91548 sym[i].st_value += secbase;
91549+ pax_close_kernel();
91550 break;
91551 }
91552 }
91553
91554+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91555+ if (is_fs_load && !register_filesystem_found) {
91556+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
91557+ ret = -EPERM;
91558+ }
91559+#endif
91560+
91561 return ret;
91562 }
91563
91564@@ -2075,22 +2132,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
91565 || s->sh_entsize != ~0UL
91566 || strstarts(sname, ".init"))
91567 continue;
91568- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
91569+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
91570+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
91571+ else
91572+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
91573 pr_debug("\t%s\n", sname);
91574 }
91575- switch (m) {
91576- case 0: /* executable */
91577- mod->core_size = debug_align(mod->core_size);
91578- mod->core_text_size = mod->core_size;
91579- break;
91580- case 1: /* RO: text and ro-data */
91581- mod->core_size = debug_align(mod->core_size);
91582- mod->core_ro_size = mod->core_size;
91583- break;
91584- case 3: /* whole core */
91585- mod->core_size = debug_align(mod->core_size);
91586- break;
91587- }
91588 }
91589
91590 pr_debug("Init section allocation order:\n");
91591@@ -2104,23 +2151,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
91592 || s->sh_entsize != ~0UL
91593 || !strstarts(sname, ".init"))
91594 continue;
91595- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
91596- | INIT_OFFSET_MASK);
91597+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
91598+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
91599+ else
91600+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
91601+ s->sh_entsize |= INIT_OFFSET_MASK;
91602 pr_debug("\t%s\n", sname);
91603 }
91604- switch (m) {
91605- case 0: /* executable */
91606- mod->init_size = debug_align(mod->init_size);
91607- mod->init_text_size = mod->init_size;
91608- break;
91609- case 1: /* RO: text and ro-data */
91610- mod->init_size = debug_align(mod->init_size);
91611- mod->init_ro_size = mod->init_size;
91612- break;
91613- case 3: /* whole init */
91614- mod->init_size = debug_align(mod->init_size);
91615- break;
91616- }
91617 }
91618 }
91619
91620@@ -2293,7 +2330,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
91621
91622 /* Put symbol section at end of init part of module. */
91623 symsect->sh_flags |= SHF_ALLOC;
91624- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
91625+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
91626 info->index.sym) | INIT_OFFSET_MASK;
91627 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
91628
91629@@ -2310,13 +2347,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
91630 }
91631
91632 /* Append room for core symbols at end of core part. */
91633- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
91634- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
91635- mod->core_size += strtab_size;
91636+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
91637+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
91638+ mod->core_size_rx += strtab_size;
91639
91640 /* Put string table section at end of init part of module. */
91641 strsect->sh_flags |= SHF_ALLOC;
91642- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
91643+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
91644 info->index.str) | INIT_OFFSET_MASK;
91645 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
91646 }
91647@@ -2334,12 +2371,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
91648 /* Make sure we get permanent strtab: don't use info->strtab. */
91649 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
91650
91651+ pax_open_kernel();
91652+
91653 /* Set types up while we still have access to sections. */
91654 for (i = 0; i < mod->num_symtab; i++)
91655 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
91656
91657- mod->core_symtab = dst = mod->module_core + info->symoffs;
91658- mod->core_strtab = s = mod->module_core + info->stroffs;
91659+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
91660+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
91661 src = mod->symtab;
91662 for (ndst = i = 0; i < mod->num_symtab; i++) {
91663 if (i == 0 ||
91664@@ -2351,6 +2390,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
91665 }
91666 }
91667 mod->core_num_syms = ndst;
91668+
91669+ pax_close_kernel();
91670 }
91671 #else
91672 static inline void layout_symtab(struct module *mod, struct load_info *info)
91673@@ -2384,17 +2425,33 @@ void * __weak module_alloc(unsigned long size)
91674 return vmalloc_exec(size);
91675 }
91676
91677-static void *module_alloc_update_bounds(unsigned long size)
91678+static void *module_alloc_update_bounds_rw(unsigned long size)
91679 {
91680 void *ret = module_alloc(size);
91681
91682 if (ret) {
91683 mutex_lock(&module_mutex);
91684 /* Update module bounds. */
91685- if ((unsigned long)ret < module_addr_min)
91686- module_addr_min = (unsigned long)ret;
91687- if ((unsigned long)ret + size > module_addr_max)
91688- module_addr_max = (unsigned long)ret + size;
91689+ if ((unsigned long)ret < module_addr_min_rw)
91690+ module_addr_min_rw = (unsigned long)ret;
91691+ if ((unsigned long)ret + size > module_addr_max_rw)
91692+ module_addr_max_rw = (unsigned long)ret + size;
91693+ mutex_unlock(&module_mutex);
91694+ }
91695+ return ret;
91696+}
91697+
91698+static void *module_alloc_update_bounds_rx(unsigned long size)
91699+{
91700+ void *ret = module_alloc_exec(size);
91701+
91702+ if (ret) {
91703+ mutex_lock(&module_mutex);
91704+ /* Update module bounds. */
91705+ if ((unsigned long)ret < module_addr_min_rx)
91706+ module_addr_min_rx = (unsigned long)ret;
91707+ if ((unsigned long)ret + size > module_addr_max_rx)
91708+ module_addr_max_rx = (unsigned long)ret + size;
91709 mutex_unlock(&module_mutex);
91710 }
91711 return ret;
91712@@ -2648,7 +2705,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
91713 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
91714
91715 if (info->index.sym == 0) {
91716+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
91717+ /*
91718+ * avoid potentially printing jibberish on attempted load
91719+ * of a module randomized with a different seed
91720+ */
91721+ pr_warn("module has no symbols (stripped?)\n");
91722+#else
91723 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
91724+#endif
91725 return ERR_PTR(-ENOEXEC);
91726 }
91727
91728@@ -2664,8 +2729,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
91729 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
91730 {
91731 const char *modmagic = get_modinfo(info, "vermagic");
91732+ const char *license = get_modinfo(info, "license");
91733 int err;
91734
91735+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
91736+ if (!license || !license_is_gpl_compatible(license))
91737+ return -ENOEXEC;
91738+#endif
91739+
91740 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
91741 modmagic = NULL;
91742
91743@@ -2690,7 +2761,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
91744 }
91745
91746 /* Set up license info based on the info section */
91747- set_license(mod, get_modinfo(info, "license"));
91748+ set_license(mod, license);
91749
91750 return 0;
91751 }
91752@@ -2784,7 +2855,7 @@ static int move_module(struct module *mod, struct load_info *info)
91753 void *ptr;
91754
91755 /* Do the allocs. */
91756- ptr = module_alloc_update_bounds(mod->core_size);
91757+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
91758 /*
91759 * The pointer to this block is stored in the module structure
91760 * which is inside the block. Just mark it as not being a
91761@@ -2794,11 +2865,11 @@ static int move_module(struct module *mod, struct load_info *info)
91762 if (!ptr)
91763 return -ENOMEM;
91764
91765- memset(ptr, 0, mod->core_size);
91766- mod->module_core = ptr;
91767+ memset(ptr, 0, mod->core_size_rw);
91768+ mod->module_core_rw = ptr;
91769
91770- if (mod->init_size) {
91771- ptr = module_alloc_update_bounds(mod->init_size);
91772+ if (mod->init_size_rw) {
91773+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
91774 /*
91775 * The pointer to this block is stored in the module structure
91776 * which is inside the block. This block doesn't need to be
91777@@ -2807,13 +2878,45 @@ static int move_module(struct module *mod, struct load_info *info)
91778 */
91779 kmemleak_ignore(ptr);
91780 if (!ptr) {
91781- module_free(mod, mod->module_core);
91782+ module_free(mod, mod->module_core_rw);
91783 return -ENOMEM;
91784 }
91785- memset(ptr, 0, mod->init_size);
91786- mod->module_init = ptr;
91787+ memset(ptr, 0, mod->init_size_rw);
91788+ mod->module_init_rw = ptr;
91789 } else
91790- mod->module_init = NULL;
91791+ mod->module_init_rw = NULL;
91792+
91793+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
91794+ kmemleak_not_leak(ptr);
91795+ if (!ptr) {
91796+ if (mod->module_init_rw)
91797+ module_free(mod, mod->module_init_rw);
91798+ module_free(mod, mod->module_core_rw);
91799+ return -ENOMEM;
91800+ }
91801+
91802+ pax_open_kernel();
91803+ memset(ptr, 0, mod->core_size_rx);
91804+ pax_close_kernel();
91805+ mod->module_core_rx = ptr;
91806+
91807+ if (mod->init_size_rx) {
91808+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
91809+ kmemleak_ignore(ptr);
91810+ if (!ptr && mod->init_size_rx) {
91811+ module_free_exec(mod, mod->module_core_rx);
91812+ if (mod->module_init_rw)
91813+ module_free(mod, mod->module_init_rw);
91814+ module_free(mod, mod->module_core_rw);
91815+ return -ENOMEM;
91816+ }
91817+
91818+ pax_open_kernel();
91819+ memset(ptr, 0, mod->init_size_rx);
91820+ pax_close_kernel();
91821+ mod->module_init_rx = ptr;
91822+ } else
91823+ mod->module_init_rx = NULL;
91824
91825 /* Transfer each section which specifies SHF_ALLOC */
91826 pr_debug("final section addresses:\n");
91827@@ -2824,16 +2927,45 @@ static int move_module(struct module *mod, struct load_info *info)
91828 if (!(shdr->sh_flags & SHF_ALLOC))
91829 continue;
91830
91831- if (shdr->sh_entsize & INIT_OFFSET_MASK)
91832- dest = mod->module_init
91833- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
91834- else
91835- dest = mod->module_core + shdr->sh_entsize;
91836+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
91837+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
91838+ dest = mod->module_init_rw
91839+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
91840+ else
91841+ dest = mod->module_init_rx
91842+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
91843+ } else {
91844+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
91845+ dest = mod->module_core_rw + shdr->sh_entsize;
91846+ else
91847+ dest = mod->module_core_rx + shdr->sh_entsize;
91848+ }
91849+
91850+ if (shdr->sh_type != SHT_NOBITS) {
91851+
91852+#ifdef CONFIG_PAX_KERNEXEC
91853+#ifdef CONFIG_X86_64
91854+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
91855+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
91856+#endif
91857+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
91858+ pax_open_kernel();
91859+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
91860+ pax_close_kernel();
91861+ } else
91862+#endif
91863
91864- if (shdr->sh_type != SHT_NOBITS)
91865 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
91866+ }
91867 /* Update sh_addr to point to copy in image. */
91868- shdr->sh_addr = (unsigned long)dest;
91869+
91870+#ifdef CONFIG_PAX_KERNEXEC
91871+ if (shdr->sh_flags & SHF_EXECINSTR)
91872+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
91873+ else
91874+#endif
91875+
91876+ shdr->sh_addr = (unsigned long)dest;
91877 pr_debug("\t0x%lx %s\n",
91878 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
91879 }
91880@@ -2890,12 +3022,12 @@ static void flush_module_icache(const struct module *mod)
91881 * Do it before processing of module parameters, so the module
91882 * can provide parameter accessor functions of its own.
91883 */
91884- if (mod->module_init)
91885- flush_icache_range((unsigned long)mod->module_init,
91886- (unsigned long)mod->module_init
91887- + mod->init_size);
91888- flush_icache_range((unsigned long)mod->module_core,
91889- (unsigned long)mod->module_core + mod->core_size);
91890+ if (mod->module_init_rx)
91891+ flush_icache_range((unsigned long)mod->module_init_rx,
91892+ (unsigned long)mod->module_init_rx
91893+ + mod->init_size_rx);
91894+ flush_icache_range((unsigned long)mod->module_core_rx,
91895+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
91896
91897 set_fs(old_fs);
91898 }
91899@@ -2952,8 +3084,10 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
91900 static void module_deallocate(struct module *mod, struct load_info *info)
91901 {
91902 percpu_modfree(mod);
91903- module_free(mod, mod->module_init);
91904- module_free(mod, mod->module_core);
91905+ module_free_exec(mod, mod->module_init_rx);
91906+ module_free_exec(mod, mod->module_core_rx);
91907+ module_free(mod, mod->module_init_rw);
91908+ module_free(mod, mod->module_core_rw);
91909 }
91910
91911 int __weak module_finalize(const Elf_Ehdr *hdr,
91912@@ -2966,7 +3100,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
91913 static int post_relocation(struct module *mod, const struct load_info *info)
91914 {
91915 /* Sort exception table now relocations are done. */
91916+ pax_open_kernel();
91917 sort_extable(mod->extable, mod->extable + mod->num_exentries);
91918+ pax_close_kernel();
91919
91920 /* Copy relocated percpu area over. */
91921 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
91922@@ -3075,11 +3211,12 @@ static int do_init_module(struct module *mod)
91923 mod->strtab = mod->core_strtab;
91924 #endif
91925 unset_module_init_ro_nx(mod);
91926- module_free(mod, mod->module_init);
91927- mod->module_init = NULL;
91928- mod->init_size = 0;
91929- mod->init_ro_size = 0;
91930- mod->init_text_size = 0;
91931+ module_free(mod, mod->module_init_rw);
91932+ module_free_exec(mod, mod->module_init_rx);
91933+ mod->module_init_rw = NULL;
91934+ mod->module_init_rx = NULL;
91935+ mod->init_size_rw = 0;
91936+ mod->init_size_rx = 0;
91937 mutex_unlock(&module_mutex);
91938 wake_up_all(&module_wq);
91939
91940@@ -3147,16 +3284,16 @@ static int complete_formation(struct module *mod, struct load_info *info)
91941 module_bug_finalize(info->hdr, info->sechdrs, mod);
91942
91943 /* Set RO and NX regions for core */
91944- set_section_ro_nx(mod->module_core,
91945- mod->core_text_size,
91946- mod->core_ro_size,
91947- mod->core_size);
91948+ set_section_ro_nx(mod->module_core_rx,
91949+ mod->core_size_rx,
91950+ mod->core_size_rx,
91951+ mod->core_size_rx);
91952
91953 /* Set RO and NX regions for init */
91954- set_section_ro_nx(mod->module_init,
91955- mod->init_text_size,
91956- mod->init_ro_size,
91957- mod->init_size);
91958+ set_section_ro_nx(mod->module_init_rx,
91959+ mod->init_size_rx,
91960+ mod->init_size_rx,
91961+ mod->init_size_rx);
91962
91963 /* Mark state as coming so strong_try_module_get() ignores us,
91964 * but kallsyms etc. can see us. */
91965@@ -3240,9 +3377,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
91966 if (err)
91967 goto free_unload;
91968
91969+ /* Now copy in args */
91970+ mod->args = strndup_user(uargs, ~0UL >> 1);
91971+ if (IS_ERR(mod->args)) {
91972+ err = PTR_ERR(mod->args);
91973+ goto free_unload;
91974+ }
91975+
91976 /* Set up MODINFO_ATTR fields */
91977 setup_modinfo(mod, info);
91978
91979+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91980+ {
91981+ char *p, *p2;
91982+
91983+ if (strstr(mod->args, "grsec_modharden_netdev")) {
91984+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
91985+ err = -EPERM;
91986+ goto free_modinfo;
91987+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
91988+ p += sizeof("grsec_modharden_normal") - 1;
91989+ p2 = strstr(p, "_");
91990+ if (p2) {
91991+ *p2 = '\0';
91992+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
91993+ *p2 = '_';
91994+ }
91995+ err = -EPERM;
91996+ goto free_modinfo;
91997+ }
91998+ }
91999+#endif
92000+
92001 /* Fix up syms, so that st_value is a pointer to location. */
92002 err = simplify_symbols(mod, info);
92003 if (err < 0)
92004@@ -3258,13 +3424,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
92005
92006 flush_module_icache(mod);
92007
92008- /* Now copy in args */
92009- mod->args = strndup_user(uargs, ~0UL >> 1);
92010- if (IS_ERR(mod->args)) {
92011- err = PTR_ERR(mod->args);
92012- goto free_arch_cleanup;
92013- }
92014-
92015 dynamic_debug_setup(info->debug, info->num_debug);
92016
92017 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
92018@@ -3312,11 +3471,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
92019 ddebug_cleanup:
92020 dynamic_debug_remove(info->debug);
92021 synchronize_sched();
92022- kfree(mod->args);
92023- free_arch_cleanup:
92024 module_arch_cleanup(mod);
92025 free_modinfo:
92026 free_modinfo(mod);
92027+ kfree(mod->args);
92028 free_unload:
92029 module_unload_free(mod);
92030 unlink_mod:
92031@@ -3401,10 +3559,16 @@ static const char *get_ksymbol(struct module *mod,
92032 unsigned long nextval;
92033
92034 /* At worse, next value is at end of module */
92035- if (within_module_init(addr, mod))
92036- nextval = (unsigned long)mod->module_init+mod->init_text_size;
92037+ if (within_module_init_rx(addr, mod))
92038+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
92039+ else if (within_module_init_rw(addr, mod))
92040+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
92041+ else if (within_module_core_rx(addr, mod))
92042+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
92043+ else if (within_module_core_rw(addr, mod))
92044+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
92045 else
92046- nextval = (unsigned long)mod->module_core+mod->core_text_size;
92047+ return NULL;
92048
92049 /* Scan for closest preceding symbol, and next symbol. (ELF
92050 starts real symbols at 1). */
92051@@ -3652,7 +3816,7 @@ static int m_show(struct seq_file *m, void *p)
92052 return 0;
92053
92054 seq_printf(m, "%s %u",
92055- mod->name, mod->init_size + mod->core_size);
92056+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
92057 print_unload_info(m, mod);
92058
92059 /* Informative for users. */
92060@@ -3661,7 +3825,7 @@ static int m_show(struct seq_file *m, void *p)
92061 mod->state == MODULE_STATE_COMING ? "Loading":
92062 "Live");
92063 /* Used by oprofile and other similar tools. */
92064- seq_printf(m, " 0x%pK", mod->module_core);
92065+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
92066
92067 /* Taints info */
92068 if (mod->taints)
92069@@ -3697,7 +3861,17 @@ static const struct file_operations proc_modules_operations = {
92070
92071 static int __init proc_modules_init(void)
92072 {
92073+#ifndef CONFIG_GRKERNSEC_HIDESYM
92074+#ifdef CONFIG_GRKERNSEC_PROC_USER
92075+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
92076+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
92077+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
92078+#else
92079 proc_create("modules", 0, NULL, &proc_modules_operations);
92080+#endif
92081+#else
92082+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
92083+#endif
92084 return 0;
92085 }
92086 module_init(proc_modules_init);
92087@@ -3758,7 +3932,8 @@ struct module *__module_address(unsigned long addr)
92088 {
92089 struct module *mod;
92090
92091- if (addr < module_addr_min || addr > module_addr_max)
92092+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
92093+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
92094 return NULL;
92095
92096 list_for_each_entry_rcu(mod, &modules, list) {
92097@@ -3799,11 +3974,20 @@ bool is_module_text_address(unsigned long addr)
92098 */
92099 struct module *__module_text_address(unsigned long addr)
92100 {
92101- struct module *mod = __module_address(addr);
92102+ struct module *mod;
92103+
92104+#ifdef CONFIG_X86_32
92105+ addr = ktla_ktva(addr);
92106+#endif
92107+
92108+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
92109+ return NULL;
92110+
92111+ mod = __module_address(addr);
92112+
92113 if (mod) {
92114 /* Make sure it's within the text section. */
92115- if (!within(addr, mod->module_init, mod->init_text_size)
92116- && !within(addr, mod->module_core, mod->core_text_size))
92117+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
92118 mod = NULL;
92119 }
92120 return mod;
92121diff --git a/kernel/notifier.c b/kernel/notifier.c
92122index 4803da6..1c5eea6 100644
92123--- a/kernel/notifier.c
92124+++ b/kernel/notifier.c
92125@@ -5,6 +5,7 @@
92126 #include <linux/rcupdate.h>
92127 #include <linux/vmalloc.h>
92128 #include <linux/reboot.h>
92129+#include <linux/mm.h>
92130
92131 /*
92132 * Notifier list for kernel code which wants to be called
92133@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
92134 while ((*nl) != NULL) {
92135 if (n->priority > (*nl)->priority)
92136 break;
92137- nl = &((*nl)->next);
92138+ nl = (struct notifier_block **)&((*nl)->next);
92139 }
92140- n->next = *nl;
92141+ pax_open_kernel();
92142+ *(const void **)&n->next = *nl;
92143 rcu_assign_pointer(*nl, n);
92144+ pax_close_kernel();
92145 return 0;
92146 }
92147
92148@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
92149 return 0;
92150 if (n->priority > (*nl)->priority)
92151 break;
92152- nl = &((*nl)->next);
92153+ nl = (struct notifier_block **)&((*nl)->next);
92154 }
92155- n->next = *nl;
92156+ pax_open_kernel();
92157+ *(const void **)&n->next = *nl;
92158 rcu_assign_pointer(*nl, n);
92159+ pax_close_kernel();
92160 return 0;
92161 }
92162
92163@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
92164 {
92165 while ((*nl) != NULL) {
92166 if ((*nl) == n) {
92167+ pax_open_kernel();
92168 rcu_assign_pointer(*nl, n->next);
92169+ pax_close_kernel();
92170 return 0;
92171 }
92172- nl = &((*nl)->next);
92173+ nl = (struct notifier_block **)&((*nl)->next);
92174 }
92175 return -ENOENT;
92176 }
92177diff --git a/kernel/padata.c b/kernel/padata.c
92178index 161402f..598814c 100644
92179--- a/kernel/padata.c
92180+++ b/kernel/padata.c
92181@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
92182 * seq_nr mod. number of cpus in use.
92183 */
92184
92185- seq_nr = atomic_inc_return(&pd->seq_nr);
92186+ seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
92187 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
92188
92189 return padata_index_to_cpu(pd, cpu_index);
92190@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
92191 padata_init_pqueues(pd);
92192 padata_init_squeues(pd);
92193 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
92194- atomic_set(&pd->seq_nr, -1);
92195+ atomic_set_unchecked(&pd->seq_nr, -1);
92196 atomic_set(&pd->reorder_objects, 0);
92197 atomic_set(&pd->refcnt, 0);
92198 pd->pinst = pinst;
92199diff --git a/kernel/panic.c b/kernel/panic.c
92200index d09dc5c..9abbdff 100644
92201--- a/kernel/panic.c
92202+++ b/kernel/panic.c
92203@@ -53,7 +53,7 @@ EXPORT_SYMBOL(panic_blink);
92204 /*
92205 * Stop ourself in panic -- architecture code may override this
92206 */
92207-void __weak panic_smp_self_stop(void)
92208+void __weak __noreturn panic_smp_self_stop(void)
92209 {
92210 while (1)
92211 cpu_relax();
92212@@ -421,7 +421,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
92213 disable_trace_on_warning();
92214
92215 pr_warn("------------[ cut here ]------------\n");
92216- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
92217+ pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
92218 raw_smp_processor_id(), current->pid, file, line, caller);
92219
92220 if (args)
92221@@ -475,7 +475,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
92222 */
92223 __visible void __stack_chk_fail(void)
92224 {
92225- panic("stack-protector: Kernel stack is corrupted in: %p\n",
92226+ dump_stack();
92227+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
92228 __builtin_return_address(0));
92229 }
92230 EXPORT_SYMBOL(__stack_chk_fail);
92231diff --git a/kernel/pid.c b/kernel/pid.c
92232index 9b9a266..c20ef80 100644
92233--- a/kernel/pid.c
92234+++ b/kernel/pid.c
92235@@ -33,6 +33,7 @@
92236 #include <linux/rculist.h>
92237 #include <linux/bootmem.h>
92238 #include <linux/hash.h>
92239+#include <linux/security.h>
92240 #include <linux/pid_namespace.h>
92241 #include <linux/init_task.h>
92242 #include <linux/syscalls.h>
92243@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
92244
92245 int pid_max = PID_MAX_DEFAULT;
92246
92247-#define RESERVED_PIDS 300
92248+#define RESERVED_PIDS 500
92249
92250 int pid_max_min = RESERVED_PIDS + 1;
92251 int pid_max_max = PID_MAX_LIMIT;
92252@@ -445,10 +446,18 @@ EXPORT_SYMBOL(pid_task);
92253 */
92254 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
92255 {
92256+ struct task_struct *task;
92257+
92258 rcu_lockdep_assert(rcu_read_lock_held(),
92259 "find_task_by_pid_ns() needs rcu_read_lock()"
92260 " protection");
92261- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
92262+
92263+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
92264+
92265+ if (gr_pid_is_chrooted(task))
92266+ return NULL;
92267+
92268+ return task;
92269 }
92270
92271 struct task_struct *find_task_by_vpid(pid_t vnr)
92272@@ -456,6 +465,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
92273 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
92274 }
92275
92276+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
92277+{
92278+ rcu_lockdep_assert(rcu_read_lock_held(),
92279+ "find_task_by_pid_ns() needs rcu_read_lock()"
92280+ " protection");
92281+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
92282+}
92283+
92284 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
92285 {
92286 struct pid *pid;
92287diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
92288index db95d8e..a0ca23f 100644
92289--- a/kernel/pid_namespace.c
92290+++ b/kernel/pid_namespace.c
92291@@ -253,7 +253,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
92292 void __user *buffer, size_t *lenp, loff_t *ppos)
92293 {
92294 struct pid_namespace *pid_ns = task_active_pid_ns(current);
92295- struct ctl_table tmp = *table;
92296+ ctl_table_no_const tmp = *table;
92297
92298 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
92299 return -EPERM;
92300diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
92301index e4e4121..71faf14 100644
92302--- a/kernel/power/Kconfig
92303+++ b/kernel/power/Kconfig
92304@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
92305 config HIBERNATION
92306 bool "Hibernation (aka 'suspend to disk')"
92307 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
92308+ depends on !GRKERNSEC_KMEM
92309+ depends on !PAX_MEMORY_SANITIZE
92310 select HIBERNATE_CALLBACKS
92311 select LZO_COMPRESS
92312 select LZO_DECOMPRESS
92313diff --git a/kernel/power/process.c b/kernel/power/process.c
92314index 4ee194e..925778f 100644
92315--- a/kernel/power/process.c
92316+++ b/kernel/power/process.c
92317@@ -35,6 +35,7 @@ static int try_to_freeze_tasks(bool user_only)
92318 unsigned int elapsed_msecs;
92319 bool wakeup = false;
92320 int sleep_usecs = USEC_PER_MSEC;
92321+ bool timedout = false;
92322
92323 do_gettimeofday(&start);
92324
92325@@ -45,13 +46,20 @@ static int try_to_freeze_tasks(bool user_only)
92326
92327 while (true) {
92328 todo = 0;
92329+ if (time_after(jiffies, end_time))
92330+ timedout = true;
92331 read_lock(&tasklist_lock);
92332 do_each_thread(g, p) {
92333 if (p == current || !freeze_task(p))
92334 continue;
92335
92336- if (!freezer_should_skip(p))
92337+ if (!freezer_should_skip(p)) {
92338 todo++;
92339+ if (timedout) {
92340+ printk(KERN_ERR "Task refusing to freeze:\n");
92341+ sched_show_task(p);
92342+ }
92343+ }
92344 } while_each_thread(g, p);
92345 read_unlock(&tasklist_lock);
92346
92347@@ -60,7 +68,7 @@ static int try_to_freeze_tasks(bool user_only)
92348 todo += wq_busy;
92349 }
92350
92351- if (!todo || time_after(jiffies, end_time))
92352+ if (!todo || timedout)
92353 break;
92354
92355 if (pm_wakeup_pending()) {
92356diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
92357index 1ce7706..3b07c49 100644
92358--- a/kernel/printk/printk.c
92359+++ b/kernel/printk/printk.c
92360@@ -490,6 +490,11 @@ static int check_syslog_permissions(int type, bool from_file)
92361 if (from_file && type != SYSLOG_ACTION_OPEN)
92362 return 0;
92363
92364+#ifdef CONFIG_GRKERNSEC_DMESG
92365+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
92366+ return -EPERM;
92367+#endif
92368+
92369 if (syslog_action_restricted(type)) {
92370 if (capable(CAP_SYSLOG))
92371 return 0;
92372diff --git a/kernel/profile.c b/kernel/profile.c
92373index 54bf5ba..df6e0a2 100644
92374--- a/kernel/profile.c
92375+++ b/kernel/profile.c
92376@@ -37,7 +37,7 @@ struct profile_hit {
92377 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
92378 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
92379
92380-static atomic_t *prof_buffer;
92381+static atomic_unchecked_t *prof_buffer;
92382 static unsigned long prof_len, prof_shift;
92383
92384 int prof_on __read_mostly;
92385@@ -256,7 +256,7 @@ static void profile_flip_buffers(void)
92386 hits[i].pc = 0;
92387 continue;
92388 }
92389- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
92390+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
92391 hits[i].hits = hits[i].pc = 0;
92392 }
92393 }
92394@@ -317,9 +317,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
92395 * Add the current hit(s) and flush the write-queue out
92396 * to the global buffer:
92397 */
92398- atomic_add(nr_hits, &prof_buffer[pc]);
92399+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
92400 for (i = 0; i < NR_PROFILE_HIT; ++i) {
92401- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
92402+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
92403 hits[i].pc = hits[i].hits = 0;
92404 }
92405 out:
92406@@ -394,7 +394,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
92407 {
92408 unsigned long pc;
92409 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
92410- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
92411+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
92412 }
92413 #endif /* !CONFIG_SMP */
92414
92415@@ -490,7 +490,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
92416 return -EFAULT;
92417 buf++; p++; count--; read++;
92418 }
92419- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
92420+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
92421 if (copy_to_user(buf, (void *)pnt, count))
92422 return -EFAULT;
92423 read += count;
92424@@ -521,7 +521,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
92425 }
92426 #endif
92427 profile_discard_flip_buffers();
92428- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
92429+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
92430 return count;
92431 }
92432
92433diff --git a/kernel/ptrace.c b/kernel/ptrace.c
92434index 54e7522..5b82dd6 100644
92435--- a/kernel/ptrace.c
92436+++ b/kernel/ptrace.c
92437@@ -321,7 +321,7 @@ static int ptrace_attach(struct task_struct *task, long request,
92438 if (seize)
92439 flags |= PT_SEIZED;
92440 rcu_read_lock();
92441- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
92442+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
92443 flags |= PT_PTRACE_CAP;
92444 rcu_read_unlock();
92445 task->ptrace = flags;
92446@@ -532,7 +532,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
92447 break;
92448 return -EIO;
92449 }
92450- if (copy_to_user(dst, buf, retval))
92451+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
92452 return -EFAULT;
92453 copied += retval;
92454 src += retval;
92455@@ -800,7 +800,7 @@ int ptrace_request(struct task_struct *child, long request,
92456 bool seized = child->ptrace & PT_SEIZED;
92457 int ret = -EIO;
92458 siginfo_t siginfo, *si;
92459- void __user *datavp = (void __user *) data;
92460+ void __user *datavp = (__force void __user *) data;
92461 unsigned long __user *datalp = datavp;
92462 unsigned long flags;
92463
92464@@ -1046,14 +1046,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
92465 goto out;
92466 }
92467
92468+ if (gr_handle_ptrace(child, request)) {
92469+ ret = -EPERM;
92470+ goto out_put_task_struct;
92471+ }
92472+
92473 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
92474 ret = ptrace_attach(child, request, addr, data);
92475 /*
92476 * Some architectures need to do book-keeping after
92477 * a ptrace attach.
92478 */
92479- if (!ret)
92480+ if (!ret) {
92481 arch_ptrace_attach(child);
92482+ gr_audit_ptrace(child);
92483+ }
92484 goto out_put_task_struct;
92485 }
92486
92487@@ -1081,7 +1088,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
92488 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
92489 if (copied != sizeof(tmp))
92490 return -EIO;
92491- return put_user(tmp, (unsigned long __user *)data);
92492+ return put_user(tmp, (__force unsigned long __user *)data);
92493 }
92494
92495 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
92496@@ -1175,7 +1182,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
92497 }
92498
92499 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
92500- compat_long_t, addr, compat_long_t, data)
92501+ compat_ulong_t, addr, compat_ulong_t, data)
92502 {
92503 struct task_struct *child;
92504 long ret;
92505@@ -1191,14 +1198,21 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
92506 goto out;
92507 }
92508
92509+ if (gr_handle_ptrace(child, request)) {
92510+ ret = -EPERM;
92511+ goto out_put_task_struct;
92512+ }
92513+
92514 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
92515 ret = ptrace_attach(child, request, addr, data);
92516 /*
92517 * Some architectures need to do book-keeping after
92518 * a ptrace attach.
92519 */
92520- if (!ret)
92521+ if (!ret) {
92522 arch_ptrace_attach(child);
92523+ gr_audit_ptrace(child);
92524+ }
92525 goto out_put_task_struct;
92526 }
92527
92528diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
92529index 948a769..5ca842b 100644
92530--- a/kernel/rcu/rcutorture.c
92531+++ b/kernel/rcu/rcutorture.c
92532@@ -124,12 +124,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
92533 rcu_torture_count) = { 0 };
92534 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
92535 rcu_torture_batch) = { 0 };
92536-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
92537-static atomic_t n_rcu_torture_alloc;
92538-static atomic_t n_rcu_torture_alloc_fail;
92539-static atomic_t n_rcu_torture_free;
92540-static atomic_t n_rcu_torture_mberror;
92541-static atomic_t n_rcu_torture_error;
92542+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
92543+static atomic_unchecked_t n_rcu_torture_alloc;
92544+static atomic_unchecked_t n_rcu_torture_alloc_fail;
92545+static atomic_unchecked_t n_rcu_torture_free;
92546+static atomic_unchecked_t n_rcu_torture_mberror;
92547+static atomic_unchecked_t n_rcu_torture_error;
92548 static long n_rcu_torture_barrier_error;
92549 static long n_rcu_torture_boost_ktrerror;
92550 static long n_rcu_torture_boost_rterror;
92551@@ -200,11 +200,11 @@ rcu_torture_alloc(void)
92552
92553 spin_lock_bh(&rcu_torture_lock);
92554 if (list_empty(&rcu_torture_freelist)) {
92555- atomic_inc(&n_rcu_torture_alloc_fail);
92556+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
92557 spin_unlock_bh(&rcu_torture_lock);
92558 return NULL;
92559 }
92560- atomic_inc(&n_rcu_torture_alloc);
92561+ atomic_inc_unchecked(&n_rcu_torture_alloc);
92562 p = rcu_torture_freelist.next;
92563 list_del_init(p);
92564 spin_unlock_bh(&rcu_torture_lock);
92565@@ -217,7 +217,7 @@ rcu_torture_alloc(void)
92566 static void
92567 rcu_torture_free(struct rcu_torture *p)
92568 {
92569- atomic_inc(&n_rcu_torture_free);
92570+ atomic_inc_unchecked(&n_rcu_torture_free);
92571 spin_lock_bh(&rcu_torture_lock);
92572 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
92573 spin_unlock_bh(&rcu_torture_lock);
92574@@ -301,7 +301,7 @@ rcu_torture_pipe_update_one(struct rcu_torture *rp)
92575 i = rp->rtort_pipe_count;
92576 if (i > RCU_TORTURE_PIPE_LEN)
92577 i = RCU_TORTURE_PIPE_LEN;
92578- atomic_inc(&rcu_torture_wcount[i]);
92579+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
92580 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
92581 rp->rtort_mbtest = 0;
92582 return true;
92583@@ -808,7 +808,7 @@ rcu_torture_writer(void *arg)
92584 i = old_rp->rtort_pipe_count;
92585 if (i > RCU_TORTURE_PIPE_LEN)
92586 i = RCU_TORTURE_PIPE_LEN;
92587- atomic_inc(&rcu_torture_wcount[i]);
92588+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
92589 old_rp->rtort_pipe_count++;
92590 switch (synctype[torture_random(&rand) % nsynctypes]) {
92591 case RTWS_DEF_FREE:
92592@@ -926,7 +926,7 @@ static void rcu_torture_timer(unsigned long unused)
92593 return;
92594 }
92595 if (p->rtort_mbtest == 0)
92596- atomic_inc(&n_rcu_torture_mberror);
92597+ atomic_inc_unchecked(&n_rcu_torture_mberror);
92598 spin_lock(&rand_lock);
92599 cur_ops->read_delay(&rand);
92600 n_rcu_torture_timers++;
92601@@ -996,7 +996,7 @@ rcu_torture_reader(void *arg)
92602 continue;
92603 }
92604 if (p->rtort_mbtest == 0)
92605- atomic_inc(&n_rcu_torture_mberror);
92606+ atomic_inc_unchecked(&n_rcu_torture_mberror);
92607 cur_ops->read_delay(&rand);
92608 preempt_disable();
92609 pipe_count = p->rtort_pipe_count;
92610@@ -1054,15 +1054,15 @@ rcu_torture_printk(char *page)
92611 }
92612 page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG);
92613 page += sprintf(page,
92614- "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
92615+ "rtc: %pP ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
92616 rcu_torture_current,
92617 rcu_torture_current_version,
92618 list_empty(&rcu_torture_freelist),
92619- atomic_read(&n_rcu_torture_alloc),
92620- atomic_read(&n_rcu_torture_alloc_fail),
92621- atomic_read(&n_rcu_torture_free));
92622+ atomic_read_unchecked(&n_rcu_torture_alloc),
92623+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
92624+ atomic_read_unchecked(&n_rcu_torture_free));
92625 page += sprintf(page, "rtmbe: %d rtbke: %ld rtbre: %ld ",
92626- atomic_read(&n_rcu_torture_mberror),
92627+ atomic_read_unchecked(&n_rcu_torture_mberror),
92628 n_rcu_torture_boost_ktrerror,
92629 n_rcu_torture_boost_rterror);
92630 page += sprintf(page, "rtbf: %ld rtb: %ld nt: %ld ",
92631@@ -1075,14 +1075,14 @@ rcu_torture_printk(char *page)
92632 n_barrier_attempts,
92633 n_rcu_torture_barrier_error);
92634 page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
92635- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
92636+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
92637 n_rcu_torture_barrier_error != 0 ||
92638 n_rcu_torture_boost_ktrerror != 0 ||
92639 n_rcu_torture_boost_rterror != 0 ||
92640 n_rcu_torture_boost_failure != 0 ||
92641 i > 1) {
92642 page += sprintf(page, "!!! ");
92643- atomic_inc(&n_rcu_torture_error);
92644+ atomic_inc_unchecked(&n_rcu_torture_error);
92645 WARN_ON_ONCE(1);
92646 }
92647 page += sprintf(page, "Reader Pipe: ");
92648@@ -1096,7 +1096,7 @@ rcu_torture_printk(char *page)
92649 page += sprintf(page, "Free-Block Circulation: ");
92650 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
92651 page += sprintf(page, " %d",
92652- atomic_read(&rcu_torture_wcount[i]));
92653+ atomic_read_unchecked(&rcu_torture_wcount[i]));
92654 }
92655 page += sprintf(page, "\n");
92656 if (cur_ops->stats)
92657@@ -1461,7 +1461,7 @@ rcu_torture_cleanup(void)
92658
92659 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
92660
92661- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
92662+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
92663 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
92664 else if (torture_onoff_failures())
92665 rcu_torture_print_module_parms(cur_ops,
92666@@ -1584,18 +1584,18 @@ rcu_torture_init(void)
92667
92668 rcu_torture_current = NULL;
92669 rcu_torture_current_version = 0;
92670- atomic_set(&n_rcu_torture_alloc, 0);
92671- atomic_set(&n_rcu_torture_alloc_fail, 0);
92672- atomic_set(&n_rcu_torture_free, 0);
92673- atomic_set(&n_rcu_torture_mberror, 0);
92674- atomic_set(&n_rcu_torture_error, 0);
92675+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
92676+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
92677+ atomic_set_unchecked(&n_rcu_torture_free, 0);
92678+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
92679+ atomic_set_unchecked(&n_rcu_torture_error, 0);
92680 n_rcu_torture_barrier_error = 0;
92681 n_rcu_torture_boost_ktrerror = 0;
92682 n_rcu_torture_boost_rterror = 0;
92683 n_rcu_torture_boost_failure = 0;
92684 n_rcu_torture_boosts = 0;
92685 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
92686- atomic_set(&rcu_torture_wcount[i], 0);
92687+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
92688 for_each_possible_cpu(cpu) {
92689 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
92690 per_cpu(rcu_torture_count, cpu)[i] = 0;
92691diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
92692index d9efcc1..ea543e9 100644
92693--- a/kernel/rcu/tiny.c
92694+++ b/kernel/rcu/tiny.c
92695@@ -42,7 +42,7 @@
92696 /* Forward declarations for tiny_plugin.h. */
92697 struct rcu_ctrlblk;
92698 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
92699-static void rcu_process_callbacks(struct softirq_action *unused);
92700+static void rcu_process_callbacks(void);
92701 static void __call_rcu(struct rcu_head *head,
92702 void (*func)(struct rcu_head *rcu),
92703 struct rcu_ctrlblk *rcp);
92704@@ -308,7 +308,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
92705 false));
92706 }
92707
92708-static void rcu_process_callbacks(struct softirq_action *unused)
92709+static __latent_entropy void rcu_process_callbacks(void)
92710 {
92711 __rcu_process_callbacks(&rcu_sched_ctrlblk);
92712 __rcu_process_callbacks(&rcu_bh_ctrlblk);
92713diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
92714index 858c565..7efd915 100644
92715--- a/kernel/rcu/tiny_plugin.h
92716+++ b/kernel/rcu/tiny_plugin.h
92717@@ -152,17 +152,17 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
92718 dump_stack();
92719 }
92720 if (*rcp->curtail && ULONG_CMP_GE(j, js))
92721- ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
92722+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies +
92723 3 * rcu_jiffies_till_stall_check() + 3;
92724 else if (ULONG_CMP_GE(j, js))
92725- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
92726+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
92727 }
92728
92729 static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
92730 {
92731 rcp->ticks_this_gp = 0;
92732 rcp->gp_start = jiffies;
92733- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
92734+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
92735 }
92736
92737 static void check_cpu_stalls(void)
92738diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
92739index 1b70cb6..ea62b0a 100644
92740--- a/kernel/rcu/tree.c
92741+++ b/kernel/rcu/tree.c
92742@@ -263,7 +263,7 @@ static void rcu_momentary_dyntick_idle(void)
92743 */
92744 rdtp = this_cpu_ptr(&rcu_dynticks);
92745 smp_mb__before_atomic(); /* Earlier stuff before QS. */
92746- atomic_add(2, &rdtp->dynticks); /* QS. */
92747+ atomic_add_unchecked(2, &rdtp->dynticks); /* QS. */
92748 smp_mb__after_atomic(); /* Later stuff after QS. */
92749 break;
92750 }
92751@@ -523,9 +523,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
92752 rcu_prepare_for_idle(smp_processor_id());
92753 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
92754 smp_mb__before_atomic(); /* See above. */
92755- atomic_inc(&rdtp->dynticks);
92756+ atomic_inc_unchecked(&rdtp->dynticks);
92757 smp_mb__after_atomic(); /* Force ordering with next sojourn. */
92758- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
92759+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
92760
92761 /*
92762 * It is illegal to enter an extended quiescent state while
92763@@ -643,10 +643,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
92764 int user)
92765 {
92766 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
92767- atomic_inc(&rdtp->dynticks);
92768+ atomic_inc_unchecked(&rdtp->dynticks);
92769 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
92770 smp_mb__after_atomic(); /* See above. */
92771- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
92772+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
92773 rcu_cleanup_after_idle(smp_processor_id());
92774 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
92775 if (!user && !is_idle_task(current)) {
92776@@ -767,14 +767,14 @@ void rcu_nmi_enter(void)
92777 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
92778
92779 if (rdtp->dynticks_nmi_nesting == 0 &&
92780- (atomic_read(&rdtp->dynticks) & 0x1))
92781+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
92782 return;
92783 rdtp->dynticks_nmi_nesting++;
92784 smp_mb__before_atomic(); /* Force delay from prior write. */
92785- atomic_inc(&rdtp->dynticks);
92786+ atomic_inc_unchecked(&rdtp->dynticks);
92787 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
92788 smp_mb__after_atomic(); /* See above. */
92789- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
92790+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
92791 }
92792
92793 /**
92794@@ -793,9 +793,9 @@ void rcu_nmi_exit(void)
92795 return;
92796 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
92797 smp_mb__before_atomic(); /* See above. */
92798- atomic_inc(&rdtp->dynticks);
92799+ atomic_inc_unchecked(&rdtp->dynticks);
92800 smp_mb__after_atomic(); /* Force delay to next write. */
92801- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
92802+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
92803 }
92804
92805 /**
92806@@ -808,7 +808,7 @@ void rcu_nmi_exit(void)
92807 */
92808 bool notrace __rcu_is_watching(void)
92809 {
92810- return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
92811+ return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
92812 }
92813
92814 /**
92815@@ -891,7 +891,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
92816 static int dyntick_save_progress_counter(struct rcu_data *rdp,
92817 bool *isidle, unsigned long *maxj)
92818 {
92819- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
92820+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
92821 rcu_sysidle_check_cpu(rdp, isidle, maxj);
92822 if ((rdp->dynticks_snap & 0x1) == 0) {
92823 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
92824@@ -920,7 +920,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
92825 int *rcrmp;
92826 unsigned int snap;
92827
92828- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
92829+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
92830 snap = (unsigned int)rdp->dynticks_snap;
92831
92832 /*
92833@@ -983,10 +983,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
92834 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
92835 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
92836 if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
92837- ACCESS_ONCE(rdp->cond_resched_completed) =
92838+ ACCESS_ONCE_RW(rdp->cond_resched_completed) =
92839 ACCESS_ONCE(rdp->mynode->completed);
92840 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
92841- ACCESS_ONCE(*rcrmp) =
92842+ ACCESS_ONCE_RW(*rcrmp) =
92843 ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
92844 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
92845 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
92846@@ -1008,7 +1008,7 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
92847 rsp->gp_start = j;
92848 smp_wmb(); /* Record start time before stall time. */
92849 j1 = rcu_jiffies_till_stall_check();
92850- ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
92851+ ACCESS_ONCE_RW(rsp->jiffies_stall) = j + j1;
92852 rsp->jiffies_resched = j + j1 / 2;
92853 }
92854
92855@@ -1049,7 +1049,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
92856 raw_spin_unlock_irqrestore(&rnp->lock, flags);
92857 return;
92858 }
92859- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
92860+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
92861 raw_spin_unlock_irqrestore(&rnp->lock, flags);
92862
92863 /*
92864@@ -1126,7 +1126,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
92865
92866 raw_spin_lock_irqsave(&rnp->lock, flags);
92867 if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
92868- ACCESS_ONCE(rsp->jiffies_stall) = jiffies +
92869+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies +
92870 3 * rcu_jiffies_till_stall_check() + 3;
92871 raw_spin_unlock_irqrestore(&rnp->lock, flags);
92872
92873@@ -1210,7 +1210,7 @@ void rcu_cpu_stall_reset(void)
92874 struct rcu_state *rsp;
92875
92876 for_each_rcu_flavor(rsp)
92877- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
92878+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
92879 }
92880
92881 /*
92882@@ -1596,7 +1596,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
92883 raw_spin_unlock_irq(&rnp->lock);
92884 return 0;
92885 }
92886- ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
92887+ ACCESS_ONCE_RW(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
92888
92889 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
92890 /*
92891@@ -1637,9 +1637,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
92892 rdp = this_cpu_ptr(rsp->rda);
92893 rcu_preempt_check_blocked_tasks(rnp);
92894 rnp->qsmask = rnp->qsmaskinit;
92895- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
92896+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
92897 WARN_ON_ONCE(rnp->completed != rsp->completed);
92898- ACCESS_ONCE(rnp->completed) = rsp->completed;
92899+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
92900 if (rnp == rdp->mynode)
92901 (void)__note_gp_changes(rsp, rnp, rdp);
92902 rcu_preempt_boost_start_gp(rnp);
92903@@ -1684,7 +1684,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
92904 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
92905 raw_spin_lock_irq(&rnp->lock);
92906 smp_mb__after_unlock_lock();
92907- ACCESS_ONCE(rsp->gp_flags) &= ~RCU_GP_FLAG_FQS;
92908+ ACCESS_ONCE_RW(rsp->gp_flags) &= ~RCU_GP_FLAG_FQS;
92909 raw_spin_unlock_irq(&rnp->lock);
92910 }
92911 return fqs_state;
92912@@ -1729,7 +1729,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
92913 rcu_for_each_node_breadth_first(rsp, rnp) {
92914 raw_spin_lock_irq(&rnp->lock);
92915 smp_mb__after_unlock_lock();
92916- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
92917+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
92918 rdp = this_cpu_ptr(rsp->rda);
92919 if (rnp == rdp->mynode)
92920 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
92921@@ -1744,14 +1744,14 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
92922 rcu_nocb_gp_set(rnp, nocb);
92923
92924 /* Declare grace period done. */
92925- ACCESS_ONCE(rsp->completed) = rsp->gpnum;
92926+ ACCESS_ONCE_RW(rsp->completed) = rsp->gpnum;
92927 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
92928 rsp->fqs_state = RCU_GP_IDLE;
92929 rdp = this_cpu_ptr(rsp->rda);
92930 /* Advance CBs to reduce false positives below. */
92931 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
92932 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
92933- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92934+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92935 trace_rcu_grace_period(rsp->name,
92936 ACCESS_ONCE(rsp->gpnum),
92937 TPS("newreq"));
92938@@ -1876,7 +1876,7 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
92939 */
92940 return false;
92941 }
92942- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92943+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92944 trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
92945 TPS("newreq"));
92946
92947@@ -2097,7 +2097,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
92948 rsp->qlen += rdp->qlen;
92949 rdp->n_cbs_orphaned += rdp->qlen;
92950 rdp->qlen_lazy = 0;
92951- ACCESS_ONCE(rdp->qlen) = 0;
92952+ ACCESS_ONCE_RW(rdp->qlen) = 0;
92953 }
92954
92955 /*
92956@@ -2344,7 +2344,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
92957 }
92958 smp_mb(); /* List handling before counting for rcu_barrier(). */
92959 rdp->qlen_lazy -= count_lazy;
92960- ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
92961+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen - count;
92962 rdp->n_cbs_invoked += count;
92963
92964 /* Reinstate batch limit if we have worked down the excess. */
92965@@ -2505,7 +2505,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
92966 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
92967 return; /* Someone beat us to it. */
92968 }
92969- ACCESS_ONCE(rsp->gp_flags) |= RCU_GP_FLAG_FQS;
92970+ ACCESS_ONCE_RW(rsp->gp_flags) |= RCU_GP_FLAG_FQS;
92971 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
92972 wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */
92973 }
92974@@ -2550,7 +2550,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
92975 /*
92976 * Do RCU core processing for the current CPU.
92977 */
92978-static void rcu_process_callbacks(struct softirq_action *unused)
92979+static void rcu_process_callbacks(void)
92980 {
92981 struct rcu_state *rsp;
92982
92983@@ -2662,7 +2662,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
92984 WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
92985 if (debug_rcu_head_queue(head)) {
92986 /* Probable double call_rcu(), so leak the callback. */
92987- ACCESS_ONCE(head->func) = rcu_leak_callback;
92988+ ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
92989 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
92990 return;
92991 }
92992@@ -2690,7 +2690,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
92993 local_irq_restore(flags);
92994 return;
92995 }
92996- ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
92997+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen + 1;
92998 if (lazy)
92999 rdp->qlen_lazy++;
93000 else
93001@@ -2965,11 +2965,11 @@ void synchronize_sched_expedited(void)
93002 * counter wrap on a 32-bit system. Quite a few more CPUs would of
93003 * course be required on a 64-bit system.
93004 */
93005- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
93006+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
93007 (ulong)atomic_long_read(&rsp->expedited_done) +
93008 ULONG_MAX / 8)) {
93009 synchronize_sched();
93010- atomic_long_inc(&rsp->expedited_wrap);
93011+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
93012 return;
93013 }
93014
93015@@ -2977,7 +2977,7 @@ void synchronize_sched_expedited(void)
93016 * Take a ticket. Note that atomic_inc_return() implies a
93017 * full memory barrier.
93018 */
93019- snap = atomic_long_inc_return(&rsp->expedited_start);
93020+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
93021 firstsnap = snap;
93022 get_online_cpus();
93023 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
93024@@ -2990,14 +2990,14 @@ void synchronize_sched_expedited(void)
93025 synchronize_sched_expedited_cpu_stop,
93026 NULL) == -EAGAIN) {
93027 put_online_cpus();
93028- atomic_long_inc(&rsp->expedited_tryfail);
93029+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
93030
93031 /* Check to see if someone else did our work for us. */
93032 s = atomic_long_read(&rsp->expedited_done);
93033 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
93034 /* ensure test happens before caller kfree */
93035 smp_mb__before_atomic(); /* ^^^ */
93036- atomic_long_inc(&rsp->expedited_workdone1);
93037+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
93038 return;
93039 }
93040
93041@@ -3006,7 +3006,7 @@ void synchronize_sched_expedited(void)
93042 udelay(trycount * num_online_cpus());
93043 } else {
93044 wait_rcu_gp(call_rcu_sched);
93045- atomic_long_inc(&rsp->expedited_normal);
93046+ atomic_long_inc_unchecked(&rsp->expedited_normal);
93047 return;
93048 }
93049
93050@@ -3015,7 +3015,7 @@ void synchronize_sched_expedited(void)
93051 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
93052 /* ensure test happens before caller kfree */
93053 smp_mb__before_atomic(); /* ^^^ */
93054- atomic_long_inc(&rsp->expedited_workdone2);
93055+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
93056 return;
93057 }
93058
93059@@ -3027,10 +3027,10 @@ void synchronize_sched_expedited(void)
93060 * period works for us.
93061 */
93062 get_online_cpus();
93063- snap = atomic_long_read(&rsp->expedited_start);
93064+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
93065 smp_mb(); /* ensure read is before try_stop_cpus(). */
93066 }
93067- atomic_long_inc(&rsp->expedited_stoppedcpus);
93068+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
93069
93070 /*
93071 * Everyone up to our most recent fetch is covered by our grace
93072@@ -3039,16 +3039,16 @@ void synchronize_sched_expedited(void)
93073 * than we did already did their update.
93074 */
93075 do {
93076- atomic_long_inc(&rsp->expedited_done_tries);
93077+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
93078 s = atomic_long_read(&rsp->expedited_done);
93079 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
93080 /* ensure test happens before caller kfree */
93081 smp_mb__before_atomic(); /* ^^^ */
93082- atomic_long_inc(&rsp->expedited_done_lost);
93083+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
93084 break;
93085 }
93086 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
93087- atomic_long_inc(&rsp->expedited_done_exit);
93088+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
93089
93090 put_online_cpus();
93091 }
93092@@ -3254,7 +3254,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
93093 * ACCESS_ONCE() to prevent the compiler from speculating
93094 * the increment to precede the early-exit check.
93095 */
93096- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
93097+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
93098 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
93099 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
93100 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
93101@@ -3304,7 +3304,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
93102
93103 /* Increment ->n_barrier_done to prevent duplicate work. */
93104 smp_mb(); /* Keep increment after above mechanism. */
93105- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
93106+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
93107 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
93108 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
93109 smp_mb(); /* Keep increment before caller's subsequent code. */
93110@@ -3349,10 +3349,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
93111 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
93112 init_callback_list(rdp);
93113 rdp->qlen_lazy = 0;
93114- ACCESS_ONCE(rdp->qlen) = 0;
93115+ ACCESS_ONCE_RW(rdp->qlen) = 0;
93116 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
93117 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
93118- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
93119+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
93120 rdp->cpu = cpu;
93121 rdp->rsp = rsp;
93122 rcu_boot_init_nocb_percpu_data(rdp);
93123@@ -3385,8 +3385,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
93124 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
93125 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
93126 rcu_sysidle_init_percpu_data(rdp->dynticks);
93127- atomic_set(&rdp->dynticks->dynticks,
93128- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
93129+ atomic_set_unchecked(&rdp->dynticks->dynticks,
93130+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
93131 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
93132
93133 /* Add CPU to rcu_node bitmasks. */
93134diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
93135index 6a86eb7..022b506 100644
93136--- a/kernel/rcu/tree.h
93137+++ b/kernel/rcu/tree.h
93138@@ -87,11 +87,11 @@ struct rcu_dynticks {
93139 long long dynticks_nesting; /* Track irq/process nesting level. */
93140 /* Process level is worth LLONG_MAX/2. */
93141 int dynticks_nmi_nesting; /* Track NMI nesting level. */
93142- atomic_t dynticks; /* Even value for idle, else odd. */
93143+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
93144 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
93145 long long dynticks_idle_nesting;
93146 /* irq/process nesting level from idle. */
93147- atomic_t dynticks_idle; /* Even value for idle, else odd. */
93148+ atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
93149 /* "Idle" excludes userspace execution. */
93150 unsigned long dynticks_idle_jiffies;
93151 /* End of last non-NMI non-idle period. */
93152@@ -461,17 +461,17 @@ struct rcu_state {
93153 /* _rcu_barrier(). */
93154 /* End of fields guarded by barrier_mutex. */
93155
93156- atomic_long_t expedited_start; /* Starting ticket. */
93157- atomic_long_t expedited_done; /* Done ticket. */
93158- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
93159- atomic_long_t expedited_tryfail; /* # acquisition failures. */
93160- atomic_long_t expedited_workdone1; /* # done by others #1. */
93161- atomic_long_t expedited_workdone2; /* # done by others #2. */
93162- atomic_long_t expedited_normal; /* # fallbacks to normal. */
93163- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
93164- atomic_long_t expedited_done_tries; /* # tries to update _done. */
93165- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
93166- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
93167+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
93168+ atomic_long_t expedited_done; /* Done ticket. */
93169+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
93170+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
93171+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
93172+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
93173+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
93174+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
93175+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
93176+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
93177+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
93178
93179 unsigned long jiffies_force_qs; /* Time at which to invoke */
93180 /* force_quiescent_state(). */
93181diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
93182index a7997e2..9787c9e 100644
93183--- a/kernel/rcu/tree_plugin.h
93184+++ b/kernel/rcu/tree_plugin.h
93185@@ -735,7 +735,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
93186 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
93187 {
93188 return !rcu_preempted_readers_exp(rnp) &&
93189- ACCESS_ONCE(rnp->expmask) == 0;
93190+ ACCESS_ONCE_RW(rnp->expmask) == 0;
93191 }
93192
93193 /*
93194@@ -897,7 +897,7 @@ void synchronize_rcu_expedited(void)
93195
93196 /* Clean up and exit. */
93197 smp_mb(); /* ensure expedited GP seen before counter increment. */
93198- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
93199+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
93200 unlock_mb_ret:
93201 mutex_unlock(&sync_rcu_preempt_exp_mutex);
93202 mb_ret:
93203@@ -1452,7 +1452,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
93204 free_cpumask_var(cm);
93205 }
93206
93207-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
93208+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
93209 .store = &rcu_cpu_kthread_task,
93210 .thread_should_run = rcu_cpu_kthread_should_run,
93211 .thread_fn = rcu_cpu_kthread,
93212@@ -1932,7 +1932,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
93213 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
93214 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
93215 cpu, ticks_value, ticks_title,
93216- atomic_read(&rdtp->dynticks) & 0xfff,
93217+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
93218 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
93219 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
93220 fast_no_hz);
93221@@ -2076,7 +2076,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
93222 return;
93223 if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
93224 /* Prior xchg orders against prior callback enqueue. */
93225- ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
93226+ ACCESS_ONCE_RW(rdp_leader->nocb_leader_sleep) = false;
93227 wake_up(&rdp_leader->nocb_wq);
93228 }
93229 }
93230@@ -2101,7 +2101,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
93231
93232 /* Enqueue the callback on the nocb list and update counts. */
93233 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
93234- ACCESS_ONCE(*old_rhpp) = rhp;
93235+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
93236 atomic_long_add(rhcount, &rdp->nocb_q_count);
93237 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
93238
93239@@ -2272,7 +2272,7 @@ wait_again:
93240 continue; /* No CBs here, try next follower. */
93241
93242 /* Move callbacks to wait-for-GP list, which is empty. */
93243- ACCESS_ONCE(rdp->nocb_head) = NULL;
93244+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
93245 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
93246 rdp->nocb_gp_count = atomic_long_xchg(&rdp->nocb_q_count, 0);
93247 rdp->nocb_gp_count_lazy =
93248@@ -2398,7 +2398,7 @@ static int rcu_nocb_kthread(void *arg)
93249 list = ACCESS_ONCE(rdp->nocb_follower_head);
93250 BUG_ON(!list);
93251 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
93252- ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
93253+ ACCESS_ONCE_RW(rdp->nocb_follower_head) = NULL;
93254 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
93255 c = atomic_long_xchg(&rdp->nocb_follower_count, 0);
93256 cl = atomic_long_xchg(&rdp->nocb_follower_count_lazy, 0);
93257@@ -2428,8 +2428,8 @@ static int rcu_nocb_kthread(void *arg)
93258 list = next;
93259 }
93260 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
93261- ACCESS_ONCE(rdp->nocb_p_count) -= c;
93262- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
93263+ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
93264+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
93265 rdp->n_nocbs_invoked += c;
93266 }
93267 return 0;
93268@@ -2446,7 +2446,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
93269 {
93270 if (!rcu_nocb_need_deferred_wakeup(rdp))
93271 return;
93272- ACCESS_ONCE(rdp->nocb_defer_wakeup) = false;
93273+ ACCESS_ONCE_RW(rdp->nocb_defer_wakeup) = false;
93274 wake_nocb_leader(rdp, false);
93275 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWakeEmpty"));
93276 }
93277@@ -2510,7 +2510,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
93278 t = kthread_run(rcu_nocb_kthread, rdp,
93279 "rcuo%c/%d", rsp->abbr, cpu);
93280 BUG_ON(IS_ERR(t));
93281- ACCESS_ONCE(rdp->nocb_kthread) = t;
93282+ ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
93283 }
93284 }
93285
93286@@ -2641,11 +2641,11 @@ static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
93287
93288 /* Record start of fully idle period. */
93289 j = jiffies;
93290- ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
93291+ ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
93292 smp_mb__before_atomic();
93293- atomic_inc(&rdtp->dynticks_idle);
93294+ atomic_inc_unchecked(&rdtp->dynticks_idle);
93295 smp_mb__after_atomic();
93296- WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
93297+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
93298 }
93299
93300 /*
93301@@ -2710,9 +2710,9 @@ static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
93302
93303 /* Record end of idle period. */
93304 smp_mb__before_atomic();
93305- atomic_inc(&rdtp->dynticks_idle);
93306+ atomic_inc_unchecked(&rdtp->dynticks_idle);
93307 smp_mb__after_atomic();
93308- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
93309+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
93310
93311 /*
93312 * If we are the timekeeping CPU, we are permitted to be non-idle
93313@@ -2753,7 +2753,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
93314 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
93315
93316 /* Pick up current idle and NMI-nesting counter and check. */
93317- cur = atomic_read(&rdtp->dynticks_idle);
93318+ cur = atomic_read_unchecked(&rdtp->dynticks_idle);
93319 if (cur & 0x1) {
93320 *isidle = false; /* We are not idle! */
93321 return;
93322@@ -2802,7 +2802,7 @@ static void rcu_sysidle(unsigned long j)
93323 case RCU_SYSIDLE_NOT:
93324
93325 /* First time all are idle, so note a short idle period. */
93326- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
93327+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
93328 break;
93329
93330 case RCU_SYSIDLE_SHORT:
93331@@ -2840,7 +2840,7 @@ static void rcu_sysidle_cancel(void)
93332 {
93333 smp_mb();
93334 if (full_sysidle_state > RCU_SYSIDLE_SHORT)
93335- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
93336+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
93337 }
93338
93339 /*
93340@@ -2888,7 +2888,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
93341 smp_mb(); /* grace period precedes setting inuse. */
93342
93343 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
93344- ACCESS_ONCE(rshp->inuse) = 0;
93345+ ACCESS_ONCE_RW(rshp->inuse) = 0;
93346 }
93347
93348 /*
93349diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
93350index 5cdc62e..cc52e88 100644
93351--- a/kernel/rcu/tree_trace.c
93352+++ b/kernel/rcu/tree_trace.c
93353@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
93354 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
93355 rdp->passed_quiesce, rdp->qs_pending);
93356 seq_printf(m, " dt=%d/%llx/%d df=%lu",
93357- atomic_read(&rdp->dynticks->dynticks),
93358+ atomic_read_unchecked(&rdp->dynticks->dynticks),
93359 rdp->dynticks->dynticks_nesting,
93360 rdp->dynticks->dynticks_nmi_nesting,
93361 rdp->dynticks_fqs);
93362@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
93363 struct rcu_state *rsp = (struct rcu_state *)m->private;
93364
93365 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
93366- atomic_long_read(&rsp->expedited_start),
93367+ atomic_long_read_unchecked(&rsp->expedited_start),
93368 atomic_long_read(&rsp->expedited_done),
93369- atomic_long_read(&rsp->expedited_wrap),
93370- atomic_long_read(&rsp->expedited_tryfail),
93371- atomic_long_read(&rsp->expedited_workdone1),
93372- atomic_long_read(&rsp->expedited_workdone2),
93373- atomic_long_read(&rsp->expedited_normal),
93374- atomic_long_read(&rsp->expedited_stoppedcpus),
93375- atomic_long_read(&rsp->expedited_done_tries),
93376- atomic_long_read(&rsp->expedited_done_lost),
93377- atomic_long_read(&rsp->expedited_done_exit));
93378+ atomic_long_read_unchecked(&rsp->expedited_wrap),
93379+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
93380+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
93381+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
93382+ atomic_long_read_unchecked(&rsp->expedited_normal),
93383+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
93384+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
93385+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
93386+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
93387 return 0;
93388 }
93389
93390diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
93391index 4056d79..c11741a 100644
93392--- a/kernel/rcu/update.c
93393+++ b/kernel/rcu/update.c
93394@@ -308,10 +308,10 @@ int rcu_jiffies_till_stall_check(void)
93395 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
93396 */
93397 if (till_stall_check < 3) {
93398- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
93399+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
93400 till_stall_check = 3;
93401 } else if (till_stall_check > 300) {
93402- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
93403+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
93404 till_stall_check = 300;
93405 }
93406 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
93407diff --git a/kernel/resource.c b/kernel/resource.c
93408index 60c5a38..ed77193 100644
93409--- a/kernel/resource.c
93410+++ b/kernel/resource.c
93411@@ -161,8 +161,18 @@ static const struct file_operations proc_iomem_operations = {
93412
93413 static int __init ioresources_init(void)
93414 {
93415+#ifdef CONFIG_GRKERNSEC_PROC_ADD
93416+#ifdef CONFIG_GRKERNSEC_PROC_USER
93417+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
93418+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
93419+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
93420+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
93421+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
93422+#endif
93423+#else
93424 proc_create("ioports", 0, NULL, &proc_ioports_operations);
93425 proc_create("iomem", 0, NULL, &proc_iomem_operations);
93426+#endif
93427 return 0;
93428 }
93429 __initcall(ioresources_init);
93430diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
93431index e73efba..c9bfbd4 100644
93432--- a/kernel/sched/auto_group.c
93433+++ b/kernel/sched/auto_group.c
93434@@ -11,7 +11,7 @@
93435
93436 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
93437 static struct autogroup autogroup_default;
93438-static atomic_t autogroup_seq_nr;
93439+static atomic_unchecked_t autogroup_seq_nr;
93440
93441 void __init autogroup_init(struct task_struct *init_task)
93442 {
93443@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
93444
93445 kref_init(&ag->kref);
93446 init_rwsem(&ag->lock);
93447- ag->id = atomic_inc_return(&autogroup_seq_nr);
93448+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
93449 ag->tg = tg;
93450 #ifdef CONFIG_RT_GROUP_SCHED
93451 /*
93452diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
93453index a63f4dc..349bbb0 100644
93454--- a/kernel/sched/completion.c
93455+++ b/kernel/sched/completion.c
93456@@ -204,7 +204,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
93457 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
93458 * or number of jiffies left till timeout) if completed.
93459 */
93460-long __sched
93461+long __sched __intentional_overflow(-1)
93462 wait_for_completion_interruptible_timeout(struct completion *x,
93463 unsigned long timeout)
93464 {
93465@@ -221,7 +221,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
93466 *
93467 * Return: -ERESTARTSYS if interrupted, 0 if completed.
93468 */
93469-int __sched wait_for_completion_killable(struct completion *x)
93470+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
93471 {
93472 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
93473 if (t == -ERESTARTSYS)
93474@@ -242,7 +242,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
93475 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
93476 * or number of jiffies left till timeout) if completed.
93477 */
93478-long __sched
93479+long __sched __intentional_overflow(-1)
93480 wait_for_completion_killable_timeout(struct completion *x,
93481 unsigned long timeout)
93482 {
93483diff --git a/kernel/sched/core.c b/kernel/sched/core.c
93484index ec1a286..6b516b8 100644
93485--- a/kernel/sched/core.c
93486+++ b/kernel/sched/core.c
93487@@ -1857,7 +1857,7 @@ void set_numabalancing_state(bool enabled)
93488 int sysctl_numa_balancing(struct ctl_table *table, int write,
93489 void __user *buffer, size_t *lenp, loff_t *ppos)
93490 {
93491- struct ctl_table t;
93492+ ctl_table_no_const t;
93493 int err;
93494 int state = numabalancing_enabled;
93495
93496@@ -2320,8 +2320,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
93497 next->active_mm = oldmm;
93498 atomic_inc(&oldmm->mm_count);
93499 enter_lazy_tlb(oldmm, next);
93500- } else
93501+ } else {
93502 switch_mm(oldmm, mm, next);
93503+ populate_stack();
93504+ }
93505
93506 if (!prev->mm) {
93507 prev->active_mm = NULL;
93508@@ -3103,6 +3105,8 @@ int can_nice(const struct task_struct *p, const int nice)
93509 /* convert nice value [19,-20] to rlimit style value [1,40] */
93510 int nice_rlim = nice_to_rlimit(nice);
93511
93512+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
93513+
93514 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
93515 capable(CAP_SYS_NICE));
93516 }
93517@@ -3129,7 +3133,8 @@ SYSCALL_DEFINE1(nice, int, increment)
93518 nice = task_nice(current) + increment;
93519
93520 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
93521- if (increment < 0 && !can_nice(current, nice))
93522+ if (increment < 0 && (!can_nice(current, nice) ||
93523+ gr_handle_chroot_nice()))
93524 return -EPERM;
93525
93526 retval = security_task_setnice(current, nice);
93527@@ -3408,6 +3413,7 @@ recheck:
93528 if (policy != p->policy && !rlim_rtprio)
93529 return -EPERM;
93530
93531+ gr_learn_resource(p, RLIMIT_RTPRIO, attr->sched_priority, 1);
93532 /* can't increase priority */
93533 if (attr->sched_priority > p->rt_priority &&
93534 attr->sched_priority > rlim_rtprio)
93535@@ -4797,6 +4803,7 @@ void idle_task_exit(void)
93536
93537 if (mm != &init_mm) {
93538 switch_mm(mm, &init_mm, current);
93539+ populate_stack();
93540 finish_arch_post_lock_switch();
93541 }
93542 mmdrop(mm);
93543@@ -4892,7 +4899,7 @@ static void migrate_tasks(unsigned int dead_cpu)
93544
93545 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
93546
93547-static struct ctl_table sd_ctl_dir[] = {
93548+static ctl_table_no_const sd_ctl_dir[] __read_only = {
93549 {
93550 .procname = "sched_domain",
93551 .mode = 0555,
93552@@ -4909,17 +4916,17 @@ static struct ctl_table sd_ctl_root[] = {
93553 {}
93554 };
93555
93556-static struct ctl_table *sd_alloc_ctl_entry(int n)
93557+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
93558 {
93559- struct ctl_table *entry =
93560+ ctl_table_no_const *entry =
93561 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
93562
93563 return entry;
93564 }
93565
93566-static void sd_free_ctl_entry(struct ctl_table **tablep)
93567+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
93568 {
93569- struct ctl_table *entry;
93570+ ctl_table_no_const *entry;
93571
93572 /*
93573 * In the intermediate directories, both the child directory and
93574@@ -4927,22 +4934,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
93575 * will always be set. In the lowest directory the names are
93576 * static strings and all have proc handlers.
93577 */
93578- for (entry = *tablep; entry->mode; entry++) {
93579- if (entry->child)
93580- sd_free_ctl_entry(&entry->child);
93581+ for (entry = tablep; entry->mode; entry++) {
93582+ if (entry->child) {
93583+ sd_free_ctl_entry(entry->child);
93584+ pax_open_kernel();
93585+ entry->child = NULL;
93586+ pax_close_kernel();
93587+ }
93588 if (entry->proc_handler == NULL)
93589 kfree(entry->procname);
93590 }
93591
93592- kfree(*tablep);
93593- *tablep = NULL;
93594+ kfree(tablep);
93595 }
93596
93597 static int min_load_idx = 0;
93598 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
93599
93600 static void
93601-set_table_entry(struct ctl_table *entry,
93602+set_table_entry(ctl_table_no_const *entry,
93603 const char *procname, void *data, int maxlen,
93604 umode_t mode, proc_handler *proc_handler,
93605 bool load_idx)
93606@@ -4962,7 +4972,7 @@ set_table_entry(struct ctl_table *entry,
93607 static struct ctl_table *
93608 sd_alloc_ctl_domain_table(struct sched_domain *sd)
93609 {
93610- struct ctl_table *table = sd_alloc_ctl_entry(14);
93611+ ctl_table_no_const *table = sd_alloc_ctl_entry(14);
93612
93613 if (table == NULL)
93614 return NULL;
93615@@ -5000,9 +5010,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
93616 return table;
93617 }
93618
93619-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
93620+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
93621 {
93622- struct ctl_table *entry, *table;
93623+ ctl_table_no_const *entry, *table;
93624 struct sched_domain *sd;
93625 int domain_num = 0, i;
93626 char buf[32];
93627@@ -5029,11 +5039,13 @@ static struct ctl_table_header *sd_sysctl_header;
93628 static void register_sched_domain_sysctl(void)
93629 {
93630 int i, cpu_num = num_possible_cpus();
93631- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
93632+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
93633 char buf[32];
93634
93635 WARN_ON(sd_ctl_dir[0].child);
93636+ pax_open_kernel();
93637 sd_ctl_dir[0].child = entry;
93638+ pax_close_kernel();
93639
93640 if (entry == NULL)
93641 return;
93642@@ -5056,8 +5068,12 @@ static void unregister_sched_domain_sysctl(void)
93643 if (sd_sysctl_header)
93644 unregister_sysctl_table(sd_sysctl_header);
93645 sd_sysctl_header = NULL;
93646- if (sd_ctl_dir[0].child)
93647- sd_free_ctl_entry(&sd_ctl_dir[0].child);
93648+ if (sd_ctl_dir[0].child) {
93649+ sd_free_ctl_entry(sd_ctl_dir[0].child);
93650+ pax_open_kernel();
93651+ sd_ctl_dir[0].child = NULL;
93652+ pax_close_kernel();
93653+ }
93654 }
93655 #else
93656 static void register_sched_domain_sysctl(void)
93657diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
93658index bfa3c86..e58767c 100644
93659--- a/kernel/sched/fair.c
93660+++ b/kernel/sched/fair.c
93661@@ -1873,7 +1873,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
93662
93663 static void reset_ptenuma_scan(struct task_struct *p)
93664 {
93665- ACCESS_ONCE(p->mm->numa_scan_seq)++;
93666+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
93667 p->mm->numa_scan_offset = 0;
93668 }
93669
93670@@ -7339,7 +7339,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
93671 * run_rebalance_domains is triggered when needed from the scheduler tick.
93672 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
93673 */
93674-static void run_rebalance_domains(struct softirq_action *h)
93675+static __latent_entropy void run_rebalance_domains(void)
93676 {
93677 struct rq *this_rq = this_rq();
93678 enum cpu_idle_type idle = this_rq->idle_balance ?
93679diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
93680index 579712f..a338a9d 100644
93681--- a/kernel/sched/sched.h
93682+++ b/kernel/sched/sched.h
93683@@ -1146,7 +1146,7 @@ struct sched_class {
93684 #ifdef CONFIG_FAIR_GROUP_SCHED
93685 void (*task_move_group) (struct task_struct *p, int on_rq);
93686 #endif
93687-};
93688+} __do_const;
93689
93690 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
93691 {
93692diff --git a/kernel/seccomp.c b/kernel/seccomp.c
93693index 44eb005..84922be 100644
93694--- a/kernel/seccomp.c
93695+++ b/kernel/seccomp.c
93696@@ -395,16 +395,15 @@ static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
93697 if (!filter)
93698 goto free_prog;
93699
93700- filter->prog = kzalloc(bpf_prog_size(new_len),
93701- GFP_KERNEL|__GFP_NOWARN);
93702+ filter->prog = bpf_prog_alloc(bpf_prog_size(new_len), __GFP_NOWARN);
93703 if (!filter->prog)
93704 goto free_filter;
93705
93706 ret = bpf_convert_filter(fp, fprog->len, filter->prog->insnsi, &new_len);
93707 if (ret)
93708 goto free_filter_prog;
93709- kfree(fp);
93710
93711+ kfree(fp);
93712 atomic_set(&filter->usage, 1);
93713 filter->prog->len = new_len;
93714
93715@@ -413,7 +412,7 @@ static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
93716 return filter;
93717
93718 free_filter_prog:
93719- kfree(filter->prog);
93720+ __bpf_prog_free(filter->prog);
93721 free_filter:
93722 kfree(filter);
93723 free_prog:
93724diff --git a/kernel/signal.c b/kernel/signal.c
93725index 8f0876f..1153a5a 100644
93726--- a/kernel/signal.c
93727+++ b/kernel/signal.c
93728@@ -53,12 +53,12 @@ static struct kmem_cache *sigqueue_cachep;
93729
93730 int print_fatal_signals __read_mostly;
93731
93732-static void __user *sig_handler(struct task_struct *t, int sig)
93733+static __sighandler_t sig_handler(struct task_struct *t, int sig)
93734 {
93735 return t->sighand->action[sig - 1].sa.sa_handler;
93736 }
93737
93738-static int sig_handler_ignored(void __user *handler, int sig)
93739+static int sig_handler_ignored(__sighandler_t handler, int sig)
93740 {
93741 /* Is it explicitly or implicitly ignored? */
93742 return handler == SIG_IGN ||
93743@@ -67,7 +67,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
93744
93745 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
93746 {
93747- void __user *handler;
93748+ __sighandler_t handler;
93749
93750 handler = sig_handler(t, sig);
93751
93752@@ -372,6 +372,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
93753 atomic_inc(&user->sigpending);
93754 rcu_read_unlock();
93755
93756+ if (!override_rlimit)
93757+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
93758+
93759 if (override_rlimit ||
93760 atomic_read(&user->sigpending) <=
93761 task_rlimit(t, RLIMIT_SIGPENDING)) {
93762@@ -499,7 +502,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
93763
93764 int unhandled_signal(struct task_struct *tsk, int sig)
93765 {
93766- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
93767+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
93768 if (is_global_init(tsk))
93769 return 1;
93770 if (handler != SIG_IGN && handler != SIG_DFL)
93771@@ -793,6 +796,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
93772 }
93773 }
93774
93775+ /* allow glibc communication via tgkill to other threads in our
93776+ thread group */
93777+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
93778+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
93779+ && gr_handle_signal(t, sig))
93780+ return -EPERM;
93781+
93782 return security_task_kill(t, info, sig, 0);
93783 }
93784
93785@@ -1176,7 +1186,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
93786 return send_signal(sig, info, p, 1);
93787 }
93788
93789-static int
93790+int
93791 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
93792 {
93793 return send_signal(sig, info, t, 0);
93794@@ -1213,6 +1223,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
93795 unsigned long int flags;
93796 int ret, blocked, ignored;
93797 struct k_sigaction *action;
93798+ int is_unhandled = 0;
93799
93800 spin_lock_irqsave(&t->sighand->siglock, flags);
93801 action = &t->sighand->action[sig-1];
93802@@ -1227,9 +1238,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
93803 }
93804 if (action->sa.sa_handler == SIG_DFL)
93805 t->signal->flags &= ~SIGNAL_UNKILLABLE;
93806+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
93807+ is_unhandled = 1;
93808 ret = specific_send_sig_info(sig, info, t);
93809 spin_unlock_irqrestore(&t->sighand->siglock, flags);
93810
93811+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
93812+ normal operation */
93813+ if (is_unhandled) {
93814+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
93815+ gr_handle_crash(t, sig);
93816+ }
93817+
93818 return ret;
93819 }
93820
93821@@ -1300,8 +1320,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
93822 ret = check_kill_permission(sig, info, p);
93823 rcu_read_unlock();
93824
93825- if (!ret && sig)
93826+ if (!ret && sig) {
93827 ret = do_send_sig_info(sig, info, p, true);
93828+ if (!ret)
93829+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
93830+ }
93831
93832 return ret;
93833 }
93834@@ -2903,7 +2926,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
93835 int error = -ESRCH;
93836
93837 rcu_read_lock();
93838- p = find_task_by_vpid(pid);
93839+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
93840+ /* allow glibc communication via tgkill to other threads in our
93841+ thread group */
93842+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
93843+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
93844+ p = find_task_by_vpid_unrestricted(pid);
93845+ else
93846+#endif
93847+ p = find_task_by_vpid(pid);
93848 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
93849 error = check_kill_permission(sig, info, p);
93850 /*
93851@@ -3236,8 +3267,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
93852 }
93853 seg = get_fs();
93854 set_fs(KERNEL_DS);
93855- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
93856- (stack_t __force __user *) &uoss,
93857+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
93858+ (stack_t __force_user *) &uoss,
93859 compat_user_stack_pointer());
93860 set_fs(seg);
93861 if (ret >= 0 && uoss_ptr) {
93862diff --git a/kernel/smpboot.c b/kernel/smpboot.c
93863index eb89e18..a4e6792 100644
93864--- a/kernel/smpboot.c
93865+++ b/kernel/smpboot.c
93866@@ -288,7 +288,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
93867 }
93868 smpboot_unpark_thread(plug_thread, cpu);
93869 }
93870- list_add(&plug_thread->list, &hotplug_threads);
93871+ pax_list_add(&plug_thread->list, &hotplug_threads);
93872 out:
93873 mutex_unlock(&smpboot_threads_lock);
93874 return ret;
93875@@ -305,7 +305,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
93876 {
93877 get_online_cpus();
93878 mutex_lock(&smpboot_threads_lock);
93879- list_del(&plug_thread->list);
93880+ pax_list_del(&plug_thread->list);
93881 smpboot_destroy_threads(plug_thread);
93882 mutex_unlock(&smpboot_threads_lock);
93883 put_online_cpus();
93884diff --git a/kernel/softirq.c b/kernel/softirq.c
93885index 5918d22..e95d1926 100644
93886--- a/kernel/softirq.c
93887+++ b/kernel/softirq.c
93888@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
93889 EXPORT_SYMBOL(irq_stat);
93890 #endif
93891
93892-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
93893+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
93894
93895 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
93896
93897@@ -266,7 +266,7 @@ restart:
93898 kstat_incr_softirqs_this_cpu(vec_nr);
93899
93900 trace_softirq_entry(vec_nr);
93901- h->action(h);
93902+ h->action();
93903 trace_softirq_exit(vec_nr);
93904 if (unlikely(prev_count != preempt_count())) {
93905 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
93906@@ -426,7 +426,7 @@ void __raise_softirq_irqoff(unsigned int nr)
93907 or_softirq_pending(1UL << nr);
93908 }
93909
93910-void open_softirq(int nr, void (*action)(struct softirq_action *))
93911+void __init open_softirq(int nr, void (*action)(void))
93912 {
93913 softirq_vec[nr].action = action;
93914 }
93915@@ -478,7 +478,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
93916 }
93917 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
93918
93919-static void tasklet_action(struct softirq_action *a)
93920+static void tasklet_action(void)
93921 {
93922 struct tasklet_struct *list;
93923
93924@@ -514,7 +514,7 @@ static void tasklet_action(struct softirq_action *a)
93925 }
93926 }
93927
93928-static void tasklet_hi_action(struct softirq_action *a)
93929+static __latent_entropy void tasklet_hi_action(void)
93930 {
93931 struct tasklet_struct *list;
93932
93933@@ -741,7 +741,7 @@ static struct notifier_block cpu_nfb = {
93934 .notifier_call = cpu_callback
93935 };
93936
93937-static struct smp_hotplug_thread softirq_threads = {
93938+static struct smp_hotplug_thread softirq_threads __read_only = {
93939 .store = &ksoftirqd,
93940 .thread_should_run = ksoftirqd_should_run,
93941 .thread_fn = run_ksoftirqd,
93942diff --git a/kernel/sys.c b/kernel/sys.c
93943index ce81291..df2ca85 100644
93944--- a/kernel/sys.c
93945+++ b/kernel/sys.c
93946@@ -148,6 +148,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
93947 error = -EACCES;
93948 goto out;
93949 }
93950+
93951+ if (gr_handle_chroot_setpriority(p, niceval)) {
93952+ error = -EACCES;
93953+ goto out;
93954+ }
93955+
93956 no_nice = security_task_setnice(p, niceval);
93957 if (no_nice) {
93958 error = no_nice;
93959@@ -351,6 +357,20 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
93960 goto error;
93961 }
93962
93963+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
93964+ goto error;
93965+
93966+ if (!gid_eq(new->gid, old->gid)) {
93967+ /* make sure we generate a learn log for what will
93968+ end up being a role transition after a full-learning
93969+ policy is generated
93970+ CAP_SETGID is required to perform a transition
93971+ we may not log a CAP_SETGID check above, e.g.
93972+ in the case where new rgid = old egid
93973+ */
93974+ gr_learn_cap(current, new, CAP_SETGID);
93975+ }
93976+
93977 if (rgid != (gid_t) -1 ||
93978 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
93979 new->sgid = new->egid;
93980@@ -386,6 +406,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
93981 old = current_cred();
93982
93983 retval = -EPERM;
93984+
93985+ if (gr_check_group_change(kgid, kgid, kgid))
93986+ goto error;
93987+
93988 if (ns_capable(old->user_ns, CAP_SETGID))
93989 new->gid = new->egid = new->sgid = new->fsgid = kgid;
93990 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
93991@@ -403,7 +427,7 @@ error:
93992 /*
93993 * change the user struct in a credentials set to match the new UID
93994 */
93995-static int set_user(struct cred *new)
93996+int set_user(struct cred *new)
93997 {
93998 struct user_struct *new_user;
93999
94000@@ -483,7 +507,18 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
94001 goto error;
94002 }
94003
94004+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
94005+ goto error;
94006+
94007 if (!uid_eq(new->uid, old->uid)) {
94008+ /* make sure we generate a learn log for what will
94009+ end up being a role transition after a full-learning
94010+ policy is generated
94011+ CAP_SETUID is required to perform a transition
94012+ we may not log a CAP_SETUID check above, e.g.
94013+ in the case where new ruid = old euid
94014+ */
94015+ gr_learn_cap(current, new, CAP_SETUID);
94016 retval = set_user(new);
94017 if (retval < 0)
94018 goto error;
94019@@ -533,6 +568,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
94020 old = current_cred();
94021
94022 retval = -EPERM;
94023+
94024+ if (gr_check_crash_uid(kuid))
94025+ goto error;
94026+ if (gr_check_user_change(kuid, kuid, kuid))
94027+ goto error;
94028+
94029 if (ns_capable(old->user_ns, CAP_SETUID)) {
94030 new->suid = new->uid = kuid;
94031 if (!uid_eq(kuid, old->uid)) {
94032@@ -602,6 +643,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
94033 goto error;
94034 }
94035
94036+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
94037+ goto error;
94038+
94039 if (ruid != (uid_t) -1) {
94040 new->uid = kruid;
94041 if (!uid_eq(kruid, old->uid)) {
94042@@ -684,6 +728,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
94043 goto error;
94044 }
94045
94046+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
94047+ goto error;
94048+
94049 if (rgid != (gid_t) -1)
94050 new->gid = krgid;
94051 if (egid != (gid_t) -1)
94052@@ -745,12 +792,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
94053 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
94054 ns_capable(old->user_ns, CAP_SETUID)) {
94055 if (!uid_eq(kuid, old->fsuid)) {
94056+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
94057+ goto error;
94058+
94059 new->fsuid = kuid;
94060 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
94061 goto change_okay;
94062 }
94063 }
94064
94065+error:
94066 abort_creds(new);
94067 return old_fsuid;
94068
94069@@ -783,12 +834,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
94070 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
94071 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
94072 ns_capable(old->user_ns, CAP_SETGID)) {
94073+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
94074+ goto error;
94075+
94076 if (!gid_eq(kgid, old->fsgid)) {
94077 new->fsgid = kgid;
94078 goto change_okay;
94079 }
94080 }
94081
94082+error:
94083 abort_creds(new);
94084 return old_fsgid;
94085
94086@@ -1167,19 +1222,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
94087 return -EFAULT;
94088
94089 down_read(&uts_sem);
94090- error = __copy_to_user(&name->sysname, &utsname()->sysname,
94091+ error = __copy_to_user(name->sysname, &utsname()->sysname,
94092 __OLD_UTS_LEN);
94093 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
94094- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
94095+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
94096 __OLD_UTS_LEN);
94097 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
94098- error |= __copy_to_user(&name->release, &utsname()->release,
94099+ error |= __copy_to_user(name->release, &utsname()->release,
94100 __OLD_UTS_LEN);
94101 error |= __put_user(0, name->release + __OLD_UTS_LEN);
94102- error |= __copy_to_user(&name->version, &utsname()->version,
94103+ error |= __copy_to_user(name->version, &utsname()->version,
94104 __OLD_UTS_LEN);
94105 error |= __put_user(0, name->version + __OLD_UTS_LEN);
94106- error |= __copy_to_user(&name->machine, &utsname()->machine,
94107+ error |= __copy_to_user(name->machine, &utsname()->machine,
94108 __OLD_UTS_LEN);
94109 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
94110 up_read(&uts_sem);
94111@@ -1381,6 +1436,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
94112 */
94113 new_rlim->rlim_cur = 1;
94114 }
94115+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
94116+ is changed to a lower value. Since tasks can be created by the same
94117+ user in between this limit change and an execve by this task, force
94118+ a recheck only for this task by setting PF_NPROC_EXCEEDED
94119+ */
94120+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
94121+ tsk->flags |= PF_NPROC_EXCEEDED;
94122 }
94123 if (!retval) {
94124 if (old_rlim)
94125diff --git a/kernel/sysctl.c b/kernel/sysctl.c
94126index 75875a7..cd8e838 100644
94127--- a/kernel/sysctl.c
94128+++ b/kernel/sysctl.c
94129@@ -94,7 +94,6 @@
94130
94131
94132 #if defined(CONFIG_SYSCTL)
94133-
94134 /* External variables not in a header file. */
94135 extern int max_threads;
94136 extern int suid_dumpable;
94137@@ -115,19 +114,20 @@ extern int sysctl_nr_trim_pages;
94138
94139 /* Constants used for minimum and maximum */
94140 #ifdef CONFIG_LOCKUP_DETECTOR
94141-static int sixty = 60;
94142+static int sixty __read_only = 60;
94143 #endif
94144
94145-static int __maybe_unused neg_one = -1;
94146+static int __maybe_unused neg_one __read_only = -1;
94147
94148-static int zero;
94149-static int __maybe_unused one = 1;
94150-static int __maybe_unused two = 2;
94151-static int __maybe_unused four = 4;
94152-static unsigned long one_ul = 1;
94153-static int one_hundred = 100;
94154+static int zero __read_only = 0;
94155+static int __maybe_unused one __read_only = 1;
94156+static int __maybe_unused two __read_only = 2;
94157+static int __maybe_unused three __read_only = 3;
94158+static int __maybe_unused four __read_only = 4;
94159+static unsigned long one_ul __read_only = 1;
94160+static int one_hundred __read_only = 100;
94161 #ifdef CONFIG_PRINTK
94162-static int ten_thousand = 10000;
94163+static int ten_thousand __read_only = 10000;
94164 #endif
94165
94166 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
94167@@ -181,10 +181,8 @@ static int proc_taint(struct ctl_table *table, int write,
94168 void __user *buffer, size_t *lenp, loff_t *ppos);
94169 #endif
94170
94171-#ifdef CONFIG_PRINTK
94172 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
94173 void __user *buffer, size_t *lenp, loff_t *ppos);
94174-#endif
94175
94176 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
94177 void __user *buffer, size_t *lenp, loff_t *ppos);
94178@@ -215,6 +213,8 @@ static int sysrq_sysctl_handler(struct ctl_table *table, int write,
94179
94180 #endif
94181
94182+extern struct ctl_table grsecurity_table[];
94183+
94184 static struct ctl_table kern_table[];
94185 static struct ctl_table vm_table[];
94186 static struct ctl_table fs_table[];
94187@@ -229,6 +229,20 @@ extern struct ctl_table epoll_table[];
94188 int sysctl_legacy_va_layout;
94189 #endif
94190
94191+#ifdef CONFIG_PAX_SOFTMODE
94192+static struct ctl_table pax_table[] = {
94193+ {
94194+ .procname = "softmode",
94195+ .data = &pax_softmode,
94196+ .maxlen = sizeof(unsigned int),
94197+ .mode = 0600,
94198+ .proc_handler = &proc_dointvec,
94199+ },
94200+
94201+ { }
94202+};
94203+#endif
94204+
94205 /* The default sysctl tables: */
94206
94207 static struct ctl_table sysctl_base_table[] = {
94208@@ -277,6 +291,22 @@ static int max_extfrag_threshold = 1000;
94209 #endif
94210
94211 static struct ctl_table kern_table[] = {
94212+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
94213+ {
94214+ .procname = "grsecurity",
94215+ .mode = 0500,
94216+ .child = grsecurity_table,
94217+ },
94218+#endif
94219+
94220+#ifdef CONFIG_PAX_SOFTMODE
94221+ {
94222+ .procname = "pax",
94223+ .mode = 0500,
94224+ .child = pax_table,
94225+ },
94226+#endif
94227+
94228 {
94229 .procname = "sched_child_runs_first",
94230 .data = &sysctl_sched_child_runs_first,
94231@@ -641,7 +671,7 @@ static struct ctl_table kern_table[] = {
94232 .data = &modprobe_path,
94233 .maxlen = KMOD_PATH_LEN,
94234 .mode = 0644,
94235- .proc_handler = proc_dostring,
94236+ .proc_handler = proc_dostring_modpriv,
94237 },
94238 {
94239 .procname = "modules_disabled",
94240@@ -808,16 +838,20 @@ static struct ctl_table kern_table[] = {
94241 .extra1 = &zero,
94242 .extra2 = &one,
94243 },
94244+#endif
94245 {
94246 .procname = "kptr_restrict",
94247 .data = &kptr_restrict,
94248 .maxlen = sizeof(int),
94249 .mode = 0644,
94250 .proc_handler = proc_dointvec_minmax_sysadmin,
94251+#ifdef CONFIG_GRKERNSEC_HIDESYM
94252+ .extra1 = &two,
94253+#else
94254 .extra1 = &zero,
94255+#endif
94256 .extra2 = &two,
94257 },
94258-#endif
94259 {
94260 .procname = "ngroups_max",
94261 .data = &ngroups_max,
94262@@ -1073,10 +1107,17 @@ static struct ctl_table kern_table[] = {
94263 */
94264 {
94265 .procname = "perf_event_paranoid",
94266- .data = &sysctl_perf_event_paranoid,
94267- .maxlen = sizeof(sysctl_perf_event_paranoid),
94268+ .data = &sysctl_perf_event_legitimately_concerned,
94269+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
94270 .mode = 0644,
94271- .proc_handler = proc_dointvec,
94272+ /* go ahead, be a hero */
94273+ .proc_handler = proc_dointvec_minmax_sysadmin,
94274+ .extra1 = &neg_one,
94275+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
94276+ .extra2 = &three,
94277+#else
94278+ .extra2 = &two,
94279+#endif
94280 },
94281 {
94282 .procname = "perf_event_mlock_kb",
94283@@ -1335,6 +1376,13 @@ static struct ctl_table vm_table[] = {
94284 .proc_handler = proc_dointvec_minmax,
94285 .extra1 = &zero,
94286 },
94287+ {
94288+ .procname = "heap_stack_gap",
94289+ .data = &sysctl_heap_stack_gap,
94290+ .maxlen = sizeof(sysctl_heap_stack_gap),
94291+ .mode = 0644,
94292+ .proc_handler = proc_doulongvec_minmax,
94293+ },
94294 #else
94295 {
94296 .procname = "nr_trim_pages",
94297@@ -1824,6 +1872,16 @@ int proc_dostring(struct ctl_table *table, int write,
94298 (char __user *)buffer, lenp, ppos);
94299 }
94300
94301+int proc_dostring_modpriv(struct ctl_table *table, int write,
94302+ void __user *buffer, size_t *lenp, loff_t *ppos)
94303+{
94304+ if (write && !capable(CAP_SYS_MODULE))
94305+ return -EPERM;
94306+
94307+ return _proc_do_string(table->data, table->maxlen, write,
94308+ buffer, lenp, ppos);
94309+}
94310+
94311 static size_t proc_skip_spaces(char **buf)
94312 {
94313 size_t ret;
94314@@ -1929,6 +1987,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
94315 len = strlen(tmp);
94316 if (len > *size)
94317 len = *size;
94318+ if (len > sizeof(tmp))
94319+ len = sizeof(tmp);
94320 if (copy_to_user(*buf, tmp, len))
94321 return -EFAULT;
94322 *size -= len;
94323@@ -2106,7 +2166,7 @@ int proc_dointvec(struct ctl_table *table, int write,
94324 static int proc_taint(struct ctl_table *table, int write,
94325 void __user *buffer, size_t *lenp, loff_t *ppos)
94326 {
94327- struct ctl_table t;
94328+ ctl_table_no_const t;
94329 unsigned long tmptaint = get_taint();
94330 int err;
94331
94332@@ -2134,7 +2194,6 @@ static int proc_taint(struct ctl_table *table, int write,
94333 return err;
94334 }
94335
94336-#ifdef CONFIG_PRINTK
94337 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
94338 void __user *buffer, size_t *lenp, loff_t *ppos)
94339 {
94340@@ -2143,7 +2202,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
94341
94342 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
94343 }
94344-#endif
94345
94346 struct do_proc_dointvec_minmax_conv_param {
94347 int *min;
94348@@ -2703,6 +2761,12 @@ int proc_dostring(struct ctl_table *table, int write,
94349 return -ENOSYS;
94350 }
94351
94352+int proc_dostring_modpriv(struct ctl_table *table, int write,
94353+ void __user *buffer, size_t *lenp, loff_t *ppos)
94354+{
94355+ return -ENOSYS;
94356+}
94357+
94358 int proc_dointvec(struct ctl_table *table, int write,
94359 void __user *buffer, size_t *lenp, loff_t *ppos)
94360 {
94361@@ -2759,5 +2823,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
94362 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
94363 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
94364 EXPORT_SYMBOL(proc_dostring);
94365+EXPORT_SYMBOL(proc_dostring_modpriv);
94366 EXPORT_SYMBOL(proc_doulongvec_minmax);
94367 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
94368diff --git a/kernel/taskstats.c b/kernel/taskstats.c
94369index 13d2f7c..c93d0b0 100644
94370--- a/kernel/taskstats.c
94371+++ b/kernel/taskstats.c
94372@@ -28,9 +28,12 @@
94373 #include <linux/fs.h>
94374 #include <linux/file.h>
94375 #include <linux/pid_namespace.h>
94376+#include <linux/grsecurity.h>
94377 #include <net/genetlink.h>
94378 #include <linux/atomic.h>
94379
94380+extern int gr_is_taskstats_denied(int pid);
94381+
94382 /*
94383 * Maximum length of a cpumask that can be specified in
94384 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
94385@@ -576,6 +579,9 @@ err:
94386
94387 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
94388 {
94389+ if (gr_is_taskstats_denied(current->pid))
94390+ return -EACCES;
94391+
94392 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
94393 return cmd_attr_register_cpumask(info);
94394 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
94395diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
94396index a7077d3..dd48a49 100644
94397--- a/kernel/time/alarmtimer.c
94398+++ b/kernel/time/alarmtimer.c
94399@@ -823,7 +823,7 @@ static int __init alarmtimer_init(void)
94400 struct platform_device *pdev;
94401 int error = 0;
94402 int i;
94403- struct k_clock alarm_clock = {
94404+ static struct k_clock alarm_clock = {
94405 .clock_getres = alarm_clock_getres,
94406 .clock_get = alarm_clock_get,
94407 .timer_create = alarm_timer_create,
94408diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
94409index 1c2fe7d..ce7483d 100644
94410--- a/kernel/time/hrtimer.c
94411+++ b/kernel/time/hrtimer.c
94412@@ -1399,7 +1399,7 @@ void hrtimer_peek_ahead_timers(void)
94413 local_irq_restore(flags);
94414 }
94415
94416-static void run_hrtimer_softirq(struct softirq_action *h)
94417+static __latent_entropy void run_hrtimer_softirq(void)
94418 {
94419 hrtimer_peek_ahead_timers();
94420 }
94421diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
94422index 3b89464..5e38379 100644
94423--- a/kernel/time/posix-cpu-timers.c
94424+++ b/kernel/time/posix-cpu-timers.c
94425@@ -1464,14 +1464,14 @@ struct k_clock clock_posix_cpu = {
94426
94427 static __init int init_posix_cpu_timers(void)
94428 {
94429- struct k_clock process = {
94430+ static struct k_clock process = {
94431 .clock_getres = process_cpu_clock_getres,
94432 .clock_get = process_cpu_clock_get,
94433 .timer_create = process_cpu_timer_create,
94434 .nsleep = process_cpu_nsleep,
94435 .nsleep_restart = process_cpu_nsleep_restart,
94436 };
94437- struct k_clock thread = {
94438+ static struct k_clock thread = {
94439 .clock_getres = thread_cpu_clock_getres,
94440 .clock_get = thread_cpu_clock_get,
94441 .timer_create = thread_cpu_timer_create,
94442diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
94443index 42b463a..a6b008f 100644
94444--- a/kernel/time/posix-timers.c
94445+++ b/kernel/time/posix-timers.c
94446@@ -43,6 +43,7 @@
94447 #include <linux/hash.h>
94448 #include <linux/posix-clock.h>
94449 #include <linux/posix-timers.h>
94450+#include <linux/grsecurity.h>
94451 #include <linux/syscalls.h>
94452 #include <linux/wait.h>
94453 #include <linux/workqueue.h>
94454@@ -124,7 +125,7 @@ static DEFINE_SPINLOCK(hash_lock);
94455 * which we beg off on and pass to do_sys_settimeofday().
94456 */
94457
94458-static struct k_clock posix_clocks[MAX_CLOCKS];
94459+static struct k_clock *posix_clocks[MAX_CLOCKS];
94460
94461 /*
94462 * These ones are defined below.
94463@@ -277,7 +278,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
94464 */
94465 static __init int init_posix_timers(void)
94466 {
94467- struct k_clock clock_realtime = {
94468+ static struct k_clock clock_realtime = {
94469 .clock_getres = hrtimer_get_res,
94470 .clock_get = posix_clock_realtime_get,
94471 .clock_set = posix_clock_realtime_set,
94472@@ -289,7 +290,7 @@ static __init int init_posix_timers(void)
94473 .timer_get = common_timer_get,
94474 .timer_del = common_timer_del,
94475 };
94476- struct k_clock clock_monotonic = {
94477+ static struct k_clock clock_monotonic = {
94478 .clock_getres = hrtimer_get_res,
94479 .clock_get = posix_ktime_get_ts,
94480 .nsleep = common_nsleep,
94481@@ -299,19 +300,19 @@ static __init int init_posix_timers(void)
94482 .timer_get = common_timer_get,
94483 .timer_del = common_timer_del,
94484 };
94485- struct k_clock clock_monotonic_raw = {
94486+ static struct k_clock clock_monotonic_raw = {
94487 .clock_getres = hrtimer_get_res,
94488 .clock_get = posix_get_monotonic_raw,
94489 };
94490- struct k_clock clock_realtime_coarse = {
94491+ static struct k_clock clock_realtime_coarse = {
94492 .clock_getres = posix_get_coarse_res,
94493 .clock_get = posix_get_realtime_coarse,
94494 };
94495- struct k_clock clock_monotonic_coarse = {
94496+ static struct k_clock clock_monotonic_coarse = {
94497 .clock_getres = posix_get_coarse_res,
94498 .clock_get = posix_get_monotonic_coarse,
94499 };
94500- struct k_clock clock_tai = {
94501+ static struct k_clock clock_tai = {
94502 .clock_getres = hrtimer_get_res,
94503 .clock_get = posix_get_tai,
94504 .nsleep = common_nsleep,
94505@@ -321,7 +322,7 @@ static __init int init_posix_timers(void)
94506 .timer_get = common_timer_get,
94507 .timer_del = common_timer_del,
94508 };
94509- struct k_clock clock_boottime = {
94510+ static struct k_clock clock_boottime = {
94511 .clock_getres = hrtimer_get_res,
94512 .clock_get = posix_get_boottime,
94513 .nsleep = common_nsleep,
94514@@ -533,7 +534,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
94515 return;
94516 }
94517
94518- posix_clocks[clock_id] = *new_clock;
94519+ posix_clocks[clock_id] = new_clock;
94520 }
94521 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
94522
94523@@ -579,9 +580,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
94524 return (id & CLOCKFD_MASK) == CLOCKFD ?
94525 &clock_posix_dynamic : &clock_posix_cpu;
94526
94527- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
94528+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
94529 return NULL;
94530- return &posix_clocks[id];
94531+ return posix_clocks[id];
94532 }
94533
94534 static int common_timer_create(struct k_itimer *new_timer)
94535@@ -599,7 +600,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
94536 struct k_clock *kc = clockid_to_kclock(which_clock);
94537 struct k_itimer *new_timer;
94538 int error, new_timer_id;
94539- sigevent_t event;
94540+ sigevent_t event = { };
94541 int it_id_set = IT_ID_NOT_SET;
94542
94543 if (!kc)
94544@@ -1013,6 +1014,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
94545 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
94546 return -EFAULT;
94547
94548+ /* only the CLOCK_REALTIME clock can be set, all other clocks
94549+ have their clock_set fptr set to a nosettime dummy function
94550+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
94551+ call common_clock_set, which calls do_sys_settimeofday, which
94552+ we hook
94553+ */
94554+
94555 return kc->clock_set(which_clock, &new_tp);
94556 }
94557
94558diff --git a/kernel/time/time.c b/kernel/time/time.c
94559index a9ae20f..d3fbde7 100644
94560--- a/kernel/time/time.c
94561+++ b/kernel/time/time.c
94562@@ -173,6 +173,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
94563 return error;
94564
94565 if (tz) {
94566+ /* we log in do_settimeofday called below, so don't log twice
94567+ */
94568+ if (!tv)
94569+ gr_log_timechange();
94570+
94571 sys_tz = *tz;
94572 update_vsyscall_tz();
94573 if (firsttime) {
94574diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
94575index ec1791f..6a086cd 100644
94576--- a/kernel/time/timekeeping.c
94577+++ b/kernel/time/timekeeping.c
94578@@ -15,6 +15,7 @@
94579 #include <linux/init.h>
94580 #include <linux/mm.h>
94581 #include <linux/sched.h>
94582+#include <linux/grsecurity.h>
94583 #include <linux/syscore_ops.h>
94584 #include <linux/clocksource.h>
94585 #include <linux/jiffies.h>
94586@@ -717,6 +718,8 @@ int do_settimeofday(const struct timespec *tv)
94587 if (!timespec_valid_strict(tv))
94588 return -EINVAL;
94589
94590+ gr_log_timechange();
94591+
94592 raw_spin_lock_irqsave(&timekeeper_lock, flags);
94593 write_seqcount_begin(&tk_core.seq);
94594
94595diff --git a/kernel/time/timer.c b/kernel/time/timer.c
94596index 9bbb834..3caa8ed 100644
94597--- a/kernel/time/timer.c
94598+++ b/kernel/time/timer.c
94599@@ -1394,7 +1394,7 @@ void update_process_times(int user_tick)
94600 /*
94601 * This function runs timers and the timer-tq in bottom half context.
94602 */
94603-static void run_timer_softirq(struct softirq_action *h)
94604+static __latent_entropy void run_timer_softirq(void)
94605 {
94606 struct tvec_base *base = __this_cpu_read(tvec_bases);
94607
94608@@ -1457,7 +1457,7 @@ static void process_timeout(unsigned long __data)
94609 *
94610 * In all cases the return value is guaranteed to be non-negative.
94611 */
94612-signed long __sched schedule_timeout(signed long timeout)
94613+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
94614 {
94615 struct timer_list timer;
94616 unsigned long expire;
94617diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
94618index 61ed862..3b52c65 100644
94619--- a/kernel/time/timer_list.c
94620+++ b/kernel/time/timer_list.c
94621@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
94622
94623 static void print_name_offset(struct seq_file *m, void *sym)
94624 {
94625+#ifdef CONFIG_GRKERNSEC_HIDESYM
94626+ SEQ_printf(m, "<%p>", NULL);
94627+#else
94628 char symname[KSYM_NAME_LEN];
94629
94630 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
94631 SEQ_printf(m, "<%pK>", sym);
94632 else
94633 SEQ_printf(m, "%s", symname);
94634+#endif
94635 }
94636
94637 static void
94638@@ -119,7 +123,11 @@ next_one:
94639 static void
94640 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
94641 {
94642+#ifdef CONFIG_GRKERNSEC_HIDESYM
94643+ SEQ_printf(m, " .base: %p\n", NULL);
94644+#else
94645 SEQ_printf(m, " .base: %pK\n", base);
94646+#endif
94647 SEQ_printf(m, " .index: %d\n",
94648 base->index);
94649 SEQ_printf(m, " .resolution: %Lu nsecs\n",
94650@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
94651 {
94652 struct proc_dir_entry *pe;
94653
94654+#ifdef CONFIG_GRKERNSEC_PROC_ADD
94655+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
94656+#else
94657 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
94658+#endif
94659 if (!pe)
94660 return -ENOMEM;
94661 return 0;
94662diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
94663index 1fb08f2..ca4bb1e 100644
94664--- a/kernel/time/timer_stats.c
94665+++ b/kernel/time/timer_stats.c
94666@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
94667 static unsigned long nr_entries;
94668 static struct entry entries[MAX_ENTRIES];
94669
94670-static atomic_t overflow_count;
94671+static atomic_unchecked_t overflow_count;
94672
94673 /*
94674 * The entries are in a hash-table, for fast lookup:
94675@@ -140,7 +140,7 @@ static void reset_entries(void)
94676 nr_entries = 0;
94677 memset(entries, 0, sizeof(entries));
94678 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
94679- atomic_set(&overflow_count, 0);
94680+ atomic_set_unchecked(&overflow_count, 0);
94681 }
94682
94683 static struct entry *alloc_entry(void)
94684@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
94685 if (likely(entry))
94686 entry->count++;
94687 else
94688- atomic_inc(&overflow_count);
94689+ atomic_inc_unchecked(&overflow_count);
94690
94691 out_unlock:
94692 raw_spin_unlock_irqrestore(lock, flags);
94693@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
94694
94695 static void print_name_offset(struct seq_file *m, unsigned long addr)
94696 {
94697+#ifdef CONFIG_GRKERNSEC_HIDESYM
94698+ seq_printf(m, "<%p>", NULL);
94699+#else
94700 char symname[KSYM_NAME_LEN];
94701
94702 if (lookup_symbol_name(addr, symname) < 0)
94703- seq_printf(m, "<%p>", (void *)addr);
94704+ seq_printf(m, "<%pK>", (void *)addr);
94705 else
94706 seq_printf(m, "%s", symname);
94707+#endif
94708 }
94709
94710 static int tstats_show(struct seq_file *m, void *v)
94711@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
94712
94713 seq_puts(m, "Timer Stats Version: v0.3\n");
94714 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
94715- if (atomic_read(&overflow_count))
94716- seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
94717+ if (atomic_read_unchecked(&overflow_count))
94718+ seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
94719 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
94720
94721 for (i = 0; i < nr_entries; i++) {
94722@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
94723 {
94724 struct proc_dir_entry *pe;
94725
94726+#ifdef CONFIG_GRKERNSEC_PROC_ADD
94727+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
94728+#else
94729 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
94730+#endif
94731 if (!pe)
94732 return -ENOMEM;
94733 return 0;
94734diff --git a/kernel/torture.c b/kernel/torture.c
94735index d600af2..27a4e9d 100644
94736--- a/kernel/torture.c
94737+++ b/kernel/torture.c
94738@@ -484,7 +484,7 @@ static int torture_shutdown_notify(struct notifier_block *unused1,
94739 mutex_lock(&fullstop_mutex);
94740 if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
94741 VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
94742- ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
94743+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_SHUTDOWN;
94744 } else {
94745 pr_warn("Concurrent rmmod and shutdown illegal!\n");
94746 }
94747@@ -551,14 +551,14 @@ static int torture_stutter(void *arg)
94748 if (!torture_must_stop()) {
94749 if (stutter > 1) {
94750 schedule_timeout_interruptible(stutter - 1);
94751- ACCESS_ONCE(stutter_pause_test) = 2;
94752+ ACCESS_ONCE_RW(stutter_pause_test) = 2;
94753 }
94754 schedule_timeout_interruptible(1);
94755- ACCESS_ONCE(stutter_pause_test) = 1;
94756+ ACCESS_ONCE_RW(stutter_pause_test) = 1;
94757 }
94758 if (!torture_must_stop())
94759 schedule_timeout_interruptible(stutter);
94760- ACCESS_ONCE(stutter_pause_test) = 0;
94761+ ACCESS_ONCE_RW(stutter_pause_test) = 0;
94762 torture_shutdown_absorb("torture_stutter");
94763 } while (!torture_must_stop());
94764 torture_kthread_stopping("torture_stutter");
94765@@ -645,7 +645,7 @@ bool torture_cleanup(void)
94766 schedule_timeout_uninterruptible(10);
94767 return true;
94768 }
94769- ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
94770+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_RMMOD;
94771 mutex_unlock(&fullstop_mutex);
94772 torture_shutdown_cleanup();
94773 torture_shuffle_cleanup();
94774diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
94775index c1bd4ad..4b861dc 100644
94776--- a/kernel/trace/blktrace.c
94777+++ b/kernel/trace/blktrace.c
94778@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
94779 struct blk_trace *bt = filp->private_data;
94780 char buf[16];
94781
94782- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
94783+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
94784
94785 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
94786 }
94787@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
94788 return 1;
94789
94790 bt = buf->chan->private_data;
94791- atomic_inc(&bt->dropped);
94792+ atomic_inc_unchecked(&bt->dropped);
94793 return 0;
94794 }
94795
94796@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
94797
94798 bt->dir = dir;
94799 bt->dev = dev;
94800- atomic_set(&bt->dropped, 0);
94801+ atomic_set_unchecked(&bt->dropped, 0);
94802 INIT_LIST_HEAD(&bt->running_list);
94803
94804 ret = -EIO;
94805diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
94806index 5916a8e..5cd3b1f 100644
94807--- a/kernel/trace/ftrace.c
94808+++ b/kernel/trace/ftrace.c
94809@@ -2128,12 +2128,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
94810 if (unlikely(ftrace_disabled))
94811 return 0;
94812
94813+ ret = ftrace_arch_code_modify_prepare();
94814+ FTRACE_WARN_ON(ret);
94815+ if (ret)
94816+ return 0;
94817+
94818 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
94819+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
94820 if (ret) {
94821 ftrace_bug(ret, ip);
94822- return 0;
94823 }
94824- return 1;
94825+ return ret ? 0 : 1;
94826 }
94827
94828 /*
94829@@ -4458,8 +4463,10 @@ static int ftrace_process_locs(struct module *mod,
94830 if (!count)
94831 return 0;
94832
94833+ pax_open_kernel();
94834 sort(start, count, sizeof(*start),
94835 ftrace_cmp_ips, ftrace_swap_ips);
94836+ pax_close_kernel();
94837
94838 start_pg = ftrace_allocate_pages(count);
94839 if (!start_pg)
94840diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
94841index 2d75c94..5ef6d32 100644
94842--- a/kernel/trace/ring_buffer.c
94843+++ b/kernel/trace/ring_buffer.c
94844@@ -352,9 +352,9 @@ struct buffer_data_page {
94845 */
94846 struct buffer_page {
94847 struct list_head list; /* list of buffer pages */
94848- local_t write; /* index for next write */
94849+ local_unchecked_t write; /* index for next write */
94850 unsigned read; /* index for next read */
94851- local_t entries; /* entries on this page */
94852+ local_unchecked_t entries; /* entries on this page */
94853 unsigned long real_end; /* real end of data */
94854 struct buffer_data_page *page; /* Actual data page */
94855 };
94856@@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
94857 unsigned long last_overrun;
94858 local_t entries_bytes;
94859 local_t entries;
94860- local_t overrun;
94861- local_t commit_overrun;
94862+ local_unchecked_t overrun;
94863+ local_unchecked_t commit_overrun;
94864 local_t dropped_events;
94865 local_t committing;
94866 local_t commits;
94867@@ -1005,8 +1005,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
94868 *
94869 * We add a counter to the write field to denote this.
94870 */
94871- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
94872- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
94873+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
94874+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
94875
94876 /*
94877 * Just make sure we have seen our old_write and synchronize
94878@@ -1034,8 +1034,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
94879 * cmpxchg to only update if an interrupt did not already
94880 * do it for us. If the cmpxchg fails, we don't care.
94881 */
94882- (void)local_cmpxchg(&next_page->write, old_write, val);
94883- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
94884+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
94885+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
94886
94887 /*
94888 * No need to worry about races with clearing out the commit.
94889@@ -1402,12 +1402,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
94890
94891 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
94892 {
94893- return local_read(&bpage->entries) & RB_WRITE_MASK;
94894+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
94895 }
94896
94897 static inline unsigned long rb_page_write(struct buffer_page *bpage)
94898 {
94899- return local_read(&bpage->write) & RB_WRITE_MASK;
94900+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
94901 }
94902
94903 static int
94904@@ -1502,7 +1502,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
94905 * bytes consumed in ring buffer from here.
94906 * Increment overrun to account for the lost events.
94907 */
94908- local_add(page_entries, &cpu_buffer->overrun);
94909+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
94910 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
94911 }
94912
94913@@ -2064,7 +2064,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
94914 * it is our responsibility to update
94915 * the counters.
94916 */
94917- local_add(entries, &cpu_buffer->overrun);
94918+ local_add_unchecked(entries, &cpu_buffer->overrun);
94919 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
94920
94921 /*
94922@@ -2214,7 +2214,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94923 if (tail == BUF_PAGE_SIZE)
94924 tail_page->real_end = 0;
94925
94926- local_sub(length, &tail_page->write);
94927+ local_sub_unchecked(length, &tail_page->write);
94928 return;
94929 }
94930
94931@@ -2249,7 +2249,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94932 rb_event_set_padding(event);
94933
94934 /* Set the write back to the previous setting */
94935- local_sub(length, &tail_page->write);
94936+ local_sub_unchecked(length, &tail_page->write);
94937 return;
94938 }
94939
94940@@ -2261,7 +2261,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94941
94942 /* Set write to end of buffer */
94943 length = (tail + length) - BUF_PAGE_SIZE;
94944- local_sub(length, &tail_page->write);
94945+ local_sub_unchecked(length, &tail_page->write);
94946 }
94947
94948 /*
94949@@ -2287,7 +2287,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
94950 * about it.
94951 */
94952 if (unlikely(next_page == commit_page)) {
94953- local_inc(&cpu_buffer->commit_overrun);
94954+ local_inc_unchecked(&cpu_buffer->commit_overrun);
94955 goto out_reset;
94956 }
94957
94958@@ -2343,7 +2343,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
94959 cpu_buffer->tail_page) &&
94960 (cpu_buffer->commit_page ==
94961 cpu_buffer->reader_page))) {
94962- local_inc(&cpu_buffer->commit_overrun);
94963+ local_inc_unchecked(&cpu_buffer->commit_overrun);
94964 goto out_reset;
94965 }
94966 }
94967@@ -2391,7 +2391,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
94968 length += RB_LEN_TIME_EXTEND;
94969
94970 tail_page = cpu_buffer->tail_page;
94971- write = local_add_return(length, &tail_page->write);
94972+ write = local_add_return_unchecked(length, &tail_page->write);
94973
94974 /* set write to only the index of the write */
94975 write &= RB_WRITE_MASK;
94976@@ -2415,7 +2415,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
94977 kmemcheck_annotate_bitfield(event, bitfield);
94978 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
94979
94980- local_inc(&tail_page->entries);
94981+ local_inc_unchecked(&tail_page->entries);
94982
94983 /*
94984 * If this is the first commit on the page, then update
94985@@ -2448,7 +2448,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94986
94987 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
94988 unsigned long write_mask =
94989- local_read(&bpage->write) & ~RB_WRITE_MASK;
94990+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
94991 unsigned long event_length = rb_event_length(event);
94992 /*
94993 * This is on the tail page. It is possible that
94994@@ -2458,7 +2458,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94995 */
94996 old_index += write_mask;
94997 new_index += write_mask;
94998- index = local_cmpxchg(&bpage->write, old_index, new_index);
94999+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
95000 if (index == old_index) {
95001 /* update counters */
95002 local_sub(event_length, &cpu_buffer->entries_bytes);
95003@@ -2850,7 +2850,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
95004
95005 /* Do the likely case first */
95006 if (likely(bpage->page == (void *)addr)) {
95007- local_dec(&bpage->entries);
95008+ local_dec_unchecked(&bpage->entries);
95009 return;
95010 }
95011
95012@@ -2862,7 +2862,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
95013 start = bpage;
95014 do {
95015 if (bpage->page == (void *)addr) {
95016- local_dec(&bpage->entries);
95017+ local_dec_unchecked(&bpage->entries);
95018 return;
95019 }
95020 rb_inc_page(cpu_buffer, &bpage);
95021@@ -3146,7 +3146,7 @@ static inline unsigned long
95022 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
95023 {
95024 return local_read(&cpu_buffer->entries) -
95025- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
95026+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
95027 }
95028
95029 /**
95030@@ -3235,7 +3235,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
95031 return 0;
95032
95033 cpu_buffer = buffer->buffers[cpu];
95034- ret = local_read(&cpu_buffer->overrun);
95035+ ret = local_read_unchecked(&cpu_buffer->overrun);
95036
95037 return ret;
95038 }
95039@@ -3258,7 +3258,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
95040 return 0;
95041
95042 cpu_buffer = buffer->buffers[cpu];
95043- ret = local_read(&cpu_buffer->commit_overrun);
95044+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
95045
95046 return ret;
95047 }
95048@@ -3343,7 +3343,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
95049 /* if you care about this being correct, lock the buffer */
95050 for_each_buffer_cpu(buffer, cpu) {
95051 cpu_buffer = buffer->buffers[cpu];
95052- overruns += local_read(&cpu_buffer->overrun);
95053+ overruns += local_read_unchecked(&cpu_buffer->overrun);
95054 }
95055
95056 return overruns;
95057@@ -3514,8 +3514,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
95058 /*
95059 * Reset the reader page to size zero.
95060 */
95061- local_set(&cpu_buffer->reader_page->write, 0);
95062- local_set(&cpu_buffer->reader_page->entries, 0);
95063+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
95064+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
95065 local_set(&cpu_buffer->reader_page->page->commit, 0);
95066 cpu_buffer->reader_page->real_end = 0;
95067
95068@@ -3549,7 +3549,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
95069 * want to compare with the last_overrun.
95070 */
95071 smp_mb();
95072- overwrite = local_read(&(cpu_buffer->overrun));
95073+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
95074
95075 /*
95076 * Here's the tricky part.
95077@@ -4121,8 +4121,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
95078
95079 cpu_buffer->head_page
95080 = list_entry(cpu_buffer->pages, struct buffer_page, list);
95081- local_set(&cpu_buffer->head_page->write, 0);
95082- local_set(&cpu_buffer->head_page->entries, 0);
95083+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
95084+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
95085 local_set(&cpu_buffer->head_page->page->commit, 0);
95086
95087 cpu_buffer->head_page->read = 0;
95088@@ -4132,14 +4132,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
95089
95090 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
95091 INIT_LIST_HEAD(&cpu_buffer->new_pages);
95092- local_set(&cpu_buffer->reader_page->write, 0);
95093- local_set(&cpu_buffer->reader_page->entries, 0);
95094+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
95095+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
95096 local_set(&cpu_buffer->reader_page->page->commit, 0);
95097 cpu_buffer->reader_page->read = 0;
95098
95099 local_set(&cpu_buffer->entries_bytes, 0);
95100- local_set(&cpu_buffer->overrun, 0);
95101- local_set(&cpu_buffer->commit_overrun, 0);
95102+ local_set_unchecked(&cpu_buffer->overrun, 0);
95103+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
95104 local_set(&cpu_buffer->dropped_events, 0);
95105 local_set(&cpu_buffer->entries, 0);
95106 local_set(&cpu_buffer->committing, 0);
95107@@ -4544,8 +4544,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
95108 rb_init_page(bpage);
95109 bpage = reader->page;
95110 reader->page = *data_page;
95111- local_set(&reader->write, 0);
95112- local_set(&reader->entries, 0);
95113+ local_set_unchecked(&reader->write, 0);
95114+ local_set_unchecked(&reader->entries, 0);
95115 reader->read = 0;
95116 *data_page = bpage;
95117
95118diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
95119index 8a52839..dd6d7c8 100644
95120--- a/kernel/trace/trace.c
95121+++ b/kernel/trace/trace.c
95122@@ -3487,7 +3487,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
95123 return 0;
95124 }
95125
95126-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
95127+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
95128 {
95129 /* do nothing if flag is already set */
95130 if (!!(trace_flags & mask) == !!enabled)
95131diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
95132index 385391f..8d2250f 100644
95133--- a/kernel/trace/trace.h
95134+++ b/kernel/trace/trace.h
95135@@ -1280,7 +1280,7 @@ extern const char *__stop___tracepoint_str[];
95136 void trace_printk_init_buffers(void);
95137 void trace_printk_start_comm(void);
95138 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
95139-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
95140+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
95141
95142 /*
95143 * Normal trace_printk() and friends allocates special buffers
95144diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
95145index 57b67b1..66082a9 100644
95146--- a/kernel/trace/trace_clock.c
95147+++ b/kernel/trace/trace_clock.c
95148@@ -124,7 +124,7 @@ u64 notrace trace_clock_global(void)
95149 return now;
95150 }
95151
95152-static atomic64_t trace_counter;
95153+static atomic64_unchecked_t trace_counter;
95154
95155 /*
95156 * trace_clock_counter(): simply an atomic counter.
95157@@ -133,5 +133,5 @@ static atomic64_t trace_counter;
95158 */
95159 u64 notrace trace_clock_counter(void)
95160 {
95161- return atomic64_add_return(1, &trace_counter);
95162+ return atomic64_inc_return_unchecked(&trace_counter);
95163 }
95164diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
95165index ef06ce7..3ea161d 100644
95166--- a/kernel/trace/trace_events.c
95167+++ b/kernel/trace/trace_events.c
95168@@ -1720,7 +1720,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
95169 return 0;
95170 }
95171
95172-struct ftrace_module_file_ops;
95173 static void __add_event_to_tracers(struct ftrace_event_call *call);
95174
95175 /* Add an additional event_call dynamically */
95176diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
95177index 0abd9b8..6a663a2 100644
95178--- a/kernel/trace/trace_mmiotrace.c
95179+++ b/kernel/trace/trace_mmiotrace.c
95180@@ -24,7 +24,7 @@ struct header_iter {
95181 static struct trace_array *mmio_trace_array;
95182 static bool overrun_detected;
95183 static unsigned long prev_overruns;
95184-static atomic_t dropped_count;
95185+static atomic_unchecked_t dropped_count;
95186
95187 static void mmio_reset_data(struct trace_array *tr)
95188 {
95189@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
95190
95191 static unsigned long count_overruns(struct trace_iterator *iter)
95192 {
95193- unsigned long cnt = atomic_xchg(&dropped_count, 0);
95194+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
95195 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
95196
95197 if (over > prev_overruns)
95198@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
95199 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
95200 sizeof(*entry), 0, pc);
95201 if (!event) {
95202- atomic_inc(&dropped_count);
95203+ atomic_inc_unchecked(&dropped_count);
95204 return;
95205 }
95206 entry = ring_buffer_event_data(event);
95207@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
95208 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
95209 sizeof(*entry), 0, pc);
95210 if (!event) {
95211- atomic_inc(&dropped_count);
95212+ atomic_inc_unchecked(&dropped_count);
95213 return;
95214 }
95215 entry = ring_buffer_event_data(event);
95216diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
95217index c6977d5..d243785 100644
95218--- a/kernel/trace/trace_output.c
95219+++ b/kernel/trace/trace_output.c
95220@@ -712,14 +712,16 @@ int register_ftrace_event(struct trace_event *event)
95221 goto out;
95222 }
95223
95224+ pax_open_kernel();
95225 if (event->funcs->trace == NULL)
95226- event->funcs->trace = trace_nop_print;
95227+ *(void **)&event->funcs->trace = trace_nop_print;
95228 if (event->funcs->raw == NULL)
95229- event->funcs->raw = trace_nop_print;
95230+ *(void **)&event->funcs->raw = trace_nop_print;
95231 if (event->funcs->hex == NULL)
95232- event->funcs->hex = trace_nop_print;
95233+ *(void **)&event->funcs->hex = trace_nop_print;
95234 if (event->funcs->binary == NULL)
95235- event->funcs->binary = trace_nop_print;
95236+ *(void **)&event->funcs->binary = trace_nop_print;
95237+ pax_close_kernel();
95238
95239 key = event->type & (EVENT_HASHSIZE - 1);
95240
95241diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
95242index 1f24ed9..10407ec 100644
95243--- a/kernel/trace/trace_seq.c
95244+++ b/kernel/trace/trace_seq.c
95245@@ -367,7 +367,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
95246
95247 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
95248 if (!IS_ERR(p)) {
95249- p = mangle_path(s->buffer + s->len, p, "\n");
95250+ p = mangle_path(s->buffer + s->len, p, "\n\\");
95251 if (p) {
95252 s->len = p - s->buffer;
95253 return 1;
95254diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
95255index 8a4e5cb..64f270d 100644
95256--- a/kernel/trace/trace_stack.c
95257+++ b/kernel/trace/trace_stack.c
95258@@ -91,7 +91,7 @@ check_stack(unsigned long ip, unsigned long *stack)
95259 return;
95260
95261 /* we do not handle interrupt stacks yet */
95262- if (!object_is_on_stack(stack))
95263+ if (!object_starts_on_stack(stack))
95264 return;
95265
95266 local_irq_save(flags);
95267diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
95268index 759d5e0..5156a5fe 100644
95269--- a/kernel/trace/trace_syscalls.c
95270+++ b/kernel/trace/trace_syscalls.c
95271@@ -313,7 +313,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
95272 int size;
95273
95274 syscall_nr = trace_get_syscall_nr(current, regs);
95275- if (syscall_nr < 0)
95276+ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
95277 return;
95278
95279 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
95280@@ -360,7 +360,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
95281 int syscall_nr;
95282
95283 syscall_nr = trace_get_syscall_nr(current, regs);
95284- if (syscall_nr < 0)
95285+ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
95286 return;
95287
95288 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
95289@@ -567,7 +567,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
95290 int size;
95291
95292 syscall_nr = trace_get_syscall_nr(current, regs);
95293- if (syscall_nr < 0)
95294+ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
95295 return;
95296 if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
95297 return;
95298@@ -602,6 +602,8 @@ static int perf_sysenter_enable(struct ftrace_event_call *call)
95299 int num;
95300
95301 num = ((struct syscall_metadata *)call->data)->syscall_nr;
95302+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
95303+ return -EINVAL;
95304
95305 mutex_lock(&syscall_trace_lock);
95306 if (!sys_perf_refcount_enter)
95307@@ -622,6 +624,8 @@ static void perf_sysenter_disable(struct ftrace_event_call *call)
95308 int num;
95309
95310 num = ((struct syscall_metadata *)call->data)->syscall_nr;
95311+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
95312+ return;
95313
95314 mutex_lock(&syscall_trace_lock);
95315 sys_perf_refcount_enter--;
95316@@ -641,7 +645,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
95317 int size;
95318
95319 syscall_nr = trace_get_syscall_nr(current, regs);
95320- if (syscall_nr < 0)
95321+ if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
95322 return;
95323 if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
95324 return;
95325@@ -674,6 +678,8 @@ static int perf_sysexit_enable(struct ftrace_event_call *call)
95326 int num;
95327
95328 num = ((struct syscall_metadata *)call->data)->syscall_nr;
95329+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
95330+ return -EINVAL;
95331
95332 mutex_lock(&syscall_trace_lock);
95333 if (!sys_perf_refcount_exit)
95334@@ -694,6 +700,8 @@ static void perf_sysexit_disable(struct ftrace_event_call *call)
95335 int num;
95336
95337 num = ((struct syscall_metadata *)call->data)->syscall_nr;
95338+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
95339+ return;
95340
95341 mutex_lock(&syscall_trace_lock);
95342 sys_perf_refcount_exit--;
95343diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
95344index aa312b0..395f343 100644
95345--- a/kernel/user_namespace.c
95346+++ b/kernel/user_namespace.c
95347@@ -82,6 +82,21 @@ int create_user_ns(struct cred *new)
95348 !kgid_has_mapping(parent_ns, group))
95349 return -EPERM;
95350
95351+#ifdef CONFIG_GRKERNSEC
95352+ /*
95353+ * This doesn't really inspire confidence:
95354+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
95355+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
95356+ * Increases kernel attack surface in areas developers
95357+ * previously cared little about ("low importance due
95358+ * to requiring "root" capability")
95359+ * To be removed when this code receives *proper* review
95360+ */
95361+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
95362+ !capable(CAP_SETGID))
95363+ return -EPERM;
95364+#endif
95365+
95366 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
95367 if (!ns)
95368 return -ENOMEM;
95369@@ -872,7 +887,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
95370 if (atomic_read(&current->mm->mm_users) > 1)
95371 return -EINVAL;
95372
95373- if (current->fs->users != 1)
95374+ if (atomic_read(&current->fs->users) != 1)
95375 return -EINVAL;
95376
95377 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
95378diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
95379index c8eac43..4b5f08f 100644
95380--- a/kernel/utsname_sysctl.c
95381+++ b/kernel/utsname_sysctl.c
95382@@ -47,7 +47,7 @@ static void put_uts(struct ctl_table *table, int write, void *which)
95383 static int proc_do_uts_string(struct ctl_table *table, int write,
95384 void __user *buffer, size_t *lenp, loff_t *ppos)
95385 {
95386- struct ctl_table uts_table;
95387+ ctl_table_no_const uts_table;
95388 int r;
95389 memcpy(&uts_table, table, sizeof(uts_table));
95390 uts_table.data = get_uts(table, write);
95391diff --git a/kernel/watchdog.c b/kernel/watchdog.c
95392index a8d6914..8fbdb13 100644
95393--- a/kernel/watchdog.c
95394+++ b/kernel/watchdog.c
95395@@ -521,7 +521,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
95396 static void watchdog_nmi_disable(unsigned int cpu) { return; }
95397 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
95398
95399-static struct smp_hotplug_thread watchdog_threads = {
95400+static struct smp_hotplug_thread watchdog_threads __read_only = {
95401 .store = &softlockup_watchdog,
95402 .thread_should_run = watchdog_should_run,
95403 .thread_fn = watchdog,
95404diff --git a/kernel/workqueue.c b/kernel/workqueue.c
95405index 5dbe22a..872413c 100644
95406--- a/kernel/workqueue.c
95407+++ b/kernel/workqueue.c
95408@@ -4507,7 +4507,7 @@ static void rebind_workers(struct worker_pool *pool)
95409 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
95410 worker_flags |= WORKER_REBOUND;
95411 worker_flags &= ~WORKER_UNBOUND;
95412- ACCESS_ONCE(worker->flags) = worker_flags;
95413+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
95414 }
95415
95416 spin_unlock_irq(&pool->lock);
95417diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
95418index a285900..5e3b26b 100644
95419--- a/lib/Kconfig.debug
95420+++ b/lib/Kconfig.debug
95421@@ -882,7 +882,7 @@ config DEBUG_MUTEXES
95422
95423 config DEBUG_WW_MUTEX_SLOWPATH
95424 bool "Wait/wound mutex debugging: Slowpath testing"
95425- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
95426+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
95427 select DEBUG_LOCK_ALLOC
95428 select DEBUG_SPINLOCK
95429 select DEBUG_MUTEXES
95430@@ -899,7 +899,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
95431
95432 config DEBUG_LOCK_ALLOC
95433 bool "Lock debugging: detect incorrect freeing of live locks"
95434- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
95435+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
95436 select DEBUG_SPINLOCK
95437 select DEBUG_MUTEXES
95438 select LOCKDEP
95439@@ -913,7 +913,7 @@ config DEBUG_LOCK_ALLOC
95440
95441 config PROVE_LOCKING
95442 bool "Lock debugging: prove locking correctness"
95443- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
95444+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
95445 select LOCKDEP
95446 select DEBUG_SPINLOCK
95447 select DEBUG_MUTEXES
95448@@ -964,7 +964,7 @@ config LOCKDEP
95449
95450 config LOCK_STAT
95451 bool "Lock usage statistics"
95452- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
95453+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
95454 select LOCKDEP
95455 select DEBUG_SPINLOCK
95456 select DEBUG_MUTEXES
95457@@ -1437,6 +1437,7 @@ config LATENCYTOP
95458 depends on DEBUG_KERNEL
95459 depends on STACKTRACE_SUPPORT
95460 depends on PROC_FS
95461+ depends on !GRKERNSEC_HIDESYM
95462 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
95463 select KALLSYMS
95464 select KALLSYMS_ALL
95465@@ -1453,7 +1454,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
95466 config DEBUG_STRICT_USER_COPY_CHECKS
95467 bool "Strict user copy size checks"
95468 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
95469- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
95470+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
95471 help
95472 Enabling this option turns a certain set of sanity checks for user
95473 copy operations into compile time failures.
95474@@ -1581,7 +1582,7 @@ endmenu # runtime tests
95475
95476 config PROVIDE_OHCI1394_DMA_INIT
95477 bool "Remote debugging over FireWire early on boot"
95478- depends on PCI && X86
95479+ depends on PCI && X86 && !GRKERNSEC
95480 help
95481 If you want to debug problems which hang or crash the kernel early
95482 on boot and the crashing machine has a FireWire port, you can use
95483diff --git a/lib/Makefile b/lib/Makefile
95484index d6b4bc4..a3724eb 100644
95485--- a/lib/Makefile
95486+++ b/lib/Makefile
95487@@ -55,7 +55,7 @@ obj-$(CONFIG_BTREE) += btree.o
95488 obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
95489 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
95490 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
95491-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
95492+obj-y += list_debug.o
95493 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
95494
95495 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
95496diff --git a/lib/average.c b/lib/average.c
95497index 114d1be..ab0350c 100644
95498--- a/lib/average.c
95499+++ b/lib/average.c
95500@@ -55,7 +55,7 @@ struct ewma *ewma_add(struct ewma *avg, unsigned long val)
95501 {
95502 unsigned long internal = ACCESS_ONCE(avg->internal);
95503
95504- ACCESS_ONCE(avg->internal) = internal ?
95505+ ACCESS_ONCE_RW(avg->internal) = internal ?
95506 (((internal << avg->weight) - internal) +
95507 (val << avg->factor)) >> avg->weight :
95508 (val << avg->factor);
95509diff --git a/lib/bitmap.c b/lib/bitmap.c
95510index 1e031f2..89e3d6f 100644
95511--- a/lib/bitmap.c
95512+++ b/lib/bitmap.c
95513@@ -131,7 +131,9 @@ void __bitmap_shift_right(unsigned long *dst,
95514 lower = src[off + k];
95515 if (left && off + k == lim - 1)
95516 lower &= mask;
95517- dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem;
95518+ dst[k] = lower >> rem;
95519+ if (rem)
95520+ dst[k] |= upper << (BITS_PER_LONG - rem);
95521 if (left && k == lim - 1)
95522 dst[k] &= mask;
95523 }
95524@@ -172,7 +174,9 @@ void __bitmap_shift_left(unsigned long *dst,
95525 upper = src[k];
95526 if (left && k == lim - 1)
95527 upper &= (1UL << left) - 1;
95528- dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem;
95529+ dst[k + off] = upper << rem;
95530+ if (rem)
95531+ dst[k + off] |= lower >> (BITS_PER_LONG - rem);
95532 if (left && k + off == lim - 1)
95533 dst[k + off] &= (1UL << left) - 1;
95534 }
95535@@ -429,7 +433,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
95536 {
95537 int c, old_c, totaldigits, ndigits, nchunks, nbits;
95538 u32 chunk;
95539- const char __user __force *ubuf = (const char __user __force *)buf;
95540+ const char __user *ubuf = (const char __force_user *)buf;
95541
95542 bitmap_zero(maskp, nmaskbits);
95543
95544@@ -514,7 +518,7 @@ int bitmap_parse_user(const char __user *ubuf,
95545 {
95546 if (!access_ok(VERIFY_READ, ubuf, ulen))
95547 return -EFAULT;
95548- return __bitmap_parse((const char __force *)ubuf,
95549+ return __bitmap_parse((const char __force_kernel *)ubuf,
95550 ulen, 1, maskp, nmaskbits);
95551
95552 }
95553@@ -605,7 +609,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
95554 {
95555 unsigned a, b;
95556 int c, old_c, totaldigits;
95557- const char __user __force *ubuf = (const char __user __force *)buf;
95558+ const char __user *ubuf = (const char __force_user *)buf;
95559 int exp_digit, in_range;
95560
95561 totaldigits = c = 0;
95562@@ -700,7 +704,7 @@ int bitmap_parselist_user(const char __user *ubuf,
95563 {
95564 if (!access_ok(VERIFY_READ, ubuf, ulen))
95565 return -EFAULT;
95566- return __bitmap_parselist((const char __force *)ubuf,
95567+ return __bitmap_parselist((const char __force_kernel *)ubuf,
95568 ulen, 1, maskp, nmaskbits);
95569 }
95570 EXPORT_SYMBOL(bitmap_parselist_user);
95571diff --git a/lib/bug.c b/lib/bug.c
95572index d1d7c78..b354235 100644
95573--- a/lib/bug.c
95574+++ b/lib/bug.c
95575@@ -137,6 +137,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
95576 return BUG_TRAP_TYPE_NONE;
95577
95578 bug = find_bug(bugaddr);
95579+ if (!bug)
95580+ return BUG_TRAP_TYPE_NONE;
95581
95582 file = NULL;
95583 line = 0;
95584diff --git a/lib/debugobjects.c b/lib/debugobjects.c
95585index 547f7f9..a6d4ba0 100644
95586--- a/lib/debugobjects.c
95587+++ b/lib/debugobjects.c
95588@@ -289,7 +289,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
95589 if (limit > 4)
95590 return;
95591
95592- is_on_stack = object_is_on_stack(addr);
95593+ is_on_stack = object_starts_on_stack(addr);
95594 if (is_on_stack == onstack)
95595 return;
95596
95597diff --git a/lib/div64.c b/lib/div64.c
95598index 4382ad7..08aa558 100644
95599--- a/lib/div64.c
95600+++ b/lib/div64.c
95601@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
95602 EXPORT_SYMBOL(__div64_32);
95603
95604 #ifndef div_s64_rem
95605-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
95606+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
95607 {
95608 u64 quotient;
95609
95610@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
95611 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
95612 */
95613 #ifndef div64_u64
95614-u64 div64_u64(u64 dividend, u64 divisor)
95615+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
95616 {
95617 u32 high = divisor >> 32;
95618 u64 quot;
95619diff --git a/lib/dma-debug.c b/lib/dma-debug.c
95620index 98f2d7e..899da5c 100644
95621--- a/lib/dma-debug.c
95622+++ b/lib/dma-debug.c
95623@@ -971,7 +971,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
95624
95625 void dma_debug_add_bus(struct bus_type *bus)
95626 {
95627- struct notifier_block *nb;
95628+ notifier_block_no_const *nb;
95629
95630 if (global_disable)
95631 return;
95632@@ -1148,7 +1148,7 @@ static void check_unmap(struct dma_debug_entry *ref)
95633
95634 static void check_for_stack(struct device *dev, void *addr)
95635 {
95636- if (object_is_on_stack(addr))
95637+ if (object_starts_on_stack(addr))
95638 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
95639 "stack [addr=%p]\n", addr);
95640 }
95641diff --git a/lib/hash.c b/lib/hash.c
95642index fea973f..386626f 100644
95643--- a/lib/hash.c
95644+++ b/lib/hash.c
95645@@ -14,7 +14,7 @@
95646 #include <linux/hash.h>
95647 #include <linux/cache.h>
95648
95649-static struct fast_hash_ops arch_hash_ops __read_mostly = {
95650+static struct fast_hash_ops arch_hash_ops __read_only = {
95651 .hash = jhash,
95652 .hash2 = jhash2,
95653 };
95654diff --git a/lib/inflate.c b/lib/inflate.c
95655index 013a761..c28f3fc 100644
95656--- a/lib/inflate.c
95657+++ b/lib/inflate.c
95658@@ -269,7 +269,7 @@ static void free(void *where)
95659 malloc_ptr = free_mem_ptr;
95660 }
95661 #else
95662-#define malloc(a) kmalloc(a, GFP_KERNEL)
95663+#define malloc(a) kmalloc((a), GFP_KERNEL)
95664 #define free(a) kfree(a)
95665 #endif
95666
95667diff --git a/lib/ioremap.c b/lib/ioremap.c
95668index 0c9216c..863bd89 100644
95669--- a/lib/ioremap.c
95670+++ b/lib/ioremap.c
95671@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
95672 unsigned long next;
95673
95674 phys_addr -= addr;
95675- pmd = pmd_alloc(&init_mm, pud, addr);
95676+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
95677 if (!pmd)
95678 return -ENOMEM;
95679 do {
95680@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
95681 unsigned long next;
95682
95683 phys_addr -= addr;
95684- pud = pud_alloc(&init_mm, pgd, addr);
95685+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
95686 if (!pud)
95687 return -ENOMEM;
95688 do {
95689diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
95690index bd2bea9..6b3c95e 100644
95691--- a/lib/is_single_threaded.c
95692+++ b/lib/is_single_threaded.c
95693@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
95694 struct task_struct *p, *t;
95695 bool ret;
95696
95697+ if (!mm)
95698+ return true;
95699+
95700 if (atomic_read(&task->signal->live) != 1)
95701 return false;
95702
95703diff --git a/lib/kobject.c b/lib/kobject.c
95704index 58751bb..93a1853 100644
95705--- a/lib/kobject.c
95706+++ b/lib/kobject.c
95707@@ -931,9 +931,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
95708
95709
95710 static DEFINE_SPINLOCK(kobj_ns_type_lock);
95711-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
95712+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
95713
95714-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
95715+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
95716 {
95717 enum kobj_ns_type type = ops->type;
95718 int error;
95719diff --git a/lib/list_debug.c b/lib/list_debug.c
95720index c24c2f7..f0296f4 100644
95721--- a/lib/list_debug.c
95722+++ b/lib/list_debug.c
95723@@ -11,7 +11,9 @@
95724 #include <linux/bug.h>
95725 #include <linux/kernel.h>
95726 #include <linux/rculist.h>
95727+#include <linux/mm.h>
95728
95729+#ifdef CONFIG_DEBUG_LIST
95730 /*
95731 * Insert a new entry between two known consecutive entries.
95732 *
95733@@ -19,21 +21,40 @@
95734 * the prev/next entries already!
95735 */
95736
95737+static bool __list_add_debug(struct list_head *new,
95738+ struct list_head *prev,
95739+ struct list_head *next)
95740+{
95741+ if (unlikely(next->prev != prev)) {
95742+ printk(KERN_ERR "list_add corruption. next->prev should be "
95743+ "prev (%p), but was %p. (next=%p).\n",
95744+ prev, next->prev, next);
95745+ BUG();
95746+ return false;
95747+ }
95748+ if (unlikely(prev->next != next)) {
95749+ printk(KERN_ERR "list_add corruption. prev->next should be "
95750+ "next (%p), but was %p. (prev=%p).\n",
95751+ next, prev->next, prev);
95752+ BUG();
95753+ return false;
95754+ }
95755+ if (unlikely(new == prev || new == next)) {
95756+ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
95757+ new, prev, next);
95758+ BUG();
95759+ return false;
95760+ }
95761+ return true;
95762+}
95763+
95764 void __list_add(struct list_head *new,
95765- struct list_head *prev,
95766- struct list_head *next)
95767+ struct list_head *prev,
95768+ struct list_head *next)
95769 {
95770- WARN(next->prev != prev,
95771- "list_add corruption. next->prev should be "
95772- "prev (%p), but was %p. (next=%p).\n",
95773- prev, next->prev, next);
95774- WARN(prev->next != next,
95775- "list_add corruption. prev->next should be "
95776- "next (%p), but was %p. (prev=%p).\n",
95777- next, prev->next, prev);
95778- WARN(new == prev || new == next,
95779- "list_add double add: new=%p, prev=%p, next=%p.\n",
95780- new, prev, next);
95781+ if (!__list_add_debug(new, prev, next))
95782+ return;
95783+
95784 next->prev = new;
95785 new->next = next;
95786 new->prev = prev;
95787@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
95788 }
95789 EXPORT_SYMBOL(__list_add);
95790
95791-void __list_del_entry(struct list_head *entry)
95792+static bool __list_del_entry_debug(struct list_head *entry)
95793 {
95794 struct list_head *prev, *next;
95795
95796 prev = entry->prev;
95797 next = entry->next;
95798
95799- if (WARN(next == LIST_POISON1,
95800- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
95801- entry, LIST_POISON1) ||
95802- WARN(prev == LIST_POISON2,
95803- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
95804- entry, LIST_POISON2) ||
95805- WARN(prev->next != entry,
95806- "list_del corruption. prev->next should be %p, "
95807- "but was %p\n", entry, prev->next) ||
95808- WARN(next->prev != entry,
95809- "list_del corruption. next->prev should be %p, "
95810- "but was %p\n", entry, next->prev))
95811+ if (unlikely(next == LIST_POISON1)) {
95812+ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
95813+ entry, LIST_POISON1);
95814+ BUG();
95815+ return false;
95816+ }
95817+ if (unlikely(prev == LIST_POISON2)) {
95818+ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
95819+ entry, LIST_POISON2);
95820+ BUG();
95821+ return false;
95822+ }
95823+ if (unlikely(entry->prev->next != entry)) {
95824+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
95825+ "but was %p\n", entry, prev->next);
95826+ BUG();
95827+ return false;
95828+ }
95829+ if (unlikely(entry->next->prev != entry)) {
95830+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
95831+ "but was %p\n", entry, next->prev);
95832+ BUG();
95833+ return false;
95834+ }
95835+ return true;
95836+}
95837+
95838+void __list_del_entry(struct list_head *entry)
95839+{
95840+ if (!__list_del_entry_debug(entry))
95841 return;
95842
95843- __list_del(prev, next);
95844+ __list_del(entry->prev, entry->next);
95845 }
95846 EXPORT_SYMBOL(__list_del_entry);
95847
95848@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
95849 void __list_add_rcu(struct list_head *new,
95850 struct list_head *prev, struct list_head *next)
95851 {
95852- WARN(next->prev != prev,
95853- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
95854- prev, next->prev, next);
95855- WARN(prev->next != next,
95856- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
95857- next, prev->next, prev);
95858+ if (!__list_add_debug(new, prev, next))
95859+ return;
95860+
95861 new->next = next;
95862 new->prev = prev;
95863 rcu_assign_pointer(list_next_rcu(prev), new);
95864 next->prev = new;
95865 }
95866 EXPORT_SYMBOL(__list_add_rcu);
95867+#endif
95868+
95869+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
95870+{
95871+#ifdef CONFIG_DEBUG_LIST
95872+ if (!__list_add_debug(new, prev, next))
95873+ return;
95874+#endif
95875+
95876+ pax_open_kernel();
95877+ next->prev = new;
95878+ new->next = next;
95879+ new->prev = prev;
95880+ prev->next = new;
95881+ pax_close_kernel();
95882+}
95883+EXPORT_SYMBOL(__pax_list_add);
95884+
95885+void pax_list_del(struct list_head *entry)
95886+{
95887+#ifdef CONFIG_DEBUG_LIST
95888+ if (!__list_del_entry_debug(entry))
95889+ return;
95890+#endif
95891+
95892+ pax_open_kernel();
95893+ __list_del(entry->prev, entry->next);
95894+ entry->next = LIST_POISON1;
95895+ entry->prev = LIST_POISON2;
95896+ pax_close_kernel();
95897+}
95898+EXPORT_SYMBOL(pax_list_del);
95899+
95900+void pax_list_del_init(struct list_head *entry)
95901+{
95902+ pax_open_kernel();
95903+ __list_del(entry->prev, entry->next);
95904+ INIT_LIST_HEAD(entry);
95905+ pax_close_kernel();
95906+}
95907+EXPORT_SYMBOL(pax_list_del_init);
95908+
95909+void __pax_list_add_rcu(struct list_head *new,
95910+ struct list_head *prev, struct list_head *next)
95911+{
95912+#ifdef CONFIG_DEBUG_LIST
95913+ if (!__list_add_debug(new, prev, next))
95914+ return;
95915+#endif
95916+
95917+ pax_open_kernel();
95918+ new->next = next;
95919+ new->prev = prev;
95920+ rcu_assign_pointer(list_next_rcu(prev), new);
95921+ next->prev = new;
95922+ pax_close_kernel();
95923+}
95924+EXPORT_SYMBOL(__pax_list_add_rcu);
95925+
95926+void pax_list_del_rcu(struct list_head *entry)
95927+{
95928+#ifdef CONFIG_DEBUG_LIST
95929+ if (!__list_del_entry_debug(entry))
95930+ return;
95931+#endif
95932+
95933+ pax_open_kernel();
95934+ __list_del(entry->prev, entry->next);
95935+ entry->next = LIST_POISON1;
95936+ entry->prev = LIST_POISON2;
95937+ pax_close_kernel();
95938+}
95939+EXPORT_SYMBOL(pax_list_del_rcu);
95940diff --git a/lib/lockref.c b/lib/lockref.c
95941index d2233de..fa1a2f6 100644
95942--- a/lib/lockref.c
95943+++ b/lib/lockref.c
95944@@ -48,13 +48,13 @@
95945 void lockref_get(struct lockref *lockref)
95946 {
95947 CMPXCHG_LOOP(
95948- new.count++;
95949+ __lockref_inc(&new);
95950 ,
95951 return;
95952 );
95953
95954 spin_lock(&lockref->lock);
95955- lockref->count++;
95956+ __lockref_inc(lockref);
95957 spin_unlock(&lockref->lock);
95958 }
95959 EXPORT_SYMBOL(lockref_get);
95960@@ -69,7 +69,7 @@ int lockref_get_not_zero(struct lockref *lockref)
95961 int retval;
95962
95963 CMPXCHG_LOOP(
95964- new.count++;
95965+ __lockref_inc(&new);
95966 if (!old.count)
95967 return 0;
95968 ,
95969@@ -79,7 +79,7 @@ int lockref_get_not_zero(struct lockref *lockref)
95970 spin_lock(&lockref->lock);
95971 retval = 0;
95972 if (lockref->count) {
95973- lockref->count++;
95974+ __lockref_inc(lockref);
95975 retval = 1;
95976 }
95977 spin_unlock(&lockref->lock);
95978@@ -96,7 +96,7 @@ EXPORT_SYMBOL(lockref_get_not_zero);
95979 int lockref_get_or_lock(struct lockref *lockref)
95980 {
95981 CMPXCHG_LOOP(
95982- new.count++;
95983+ __lockref_inc(&new);
95984 if (!old.count)
95985 break;
95986 ,
95987@@ -106,7 +106,7 @@ int lockref_get_or_lock(struct lockref *lockref)
95988 spin_lock(&lockref->lock);
95989 if (!lockref->count)
95990 return 0;
95991- lockref->count++;
95992+ __lockref_inc(lockref);
95993 spin_unlock(&lockref->lock);
95994 return 1;
95995 }
95996@@ -120,7 +120,7 @@ EXPORT_SYMBOL(lockref_get_or_lock);
95997 int lockref_put_or_lock(struct lockref *lockref)
95998 {
95999 CMPXCHG_LOOP(
96000- new.count--;
96001+ __lockref_dec(&new);
96002 if (old.count <= 1)
96003 break;
96004 ,
96005@@ -130,7 +130,7 @@ int lockref_put_or_lock(struct lockref *lockref)
96006 spin_lock(&lockref->lock);
96007 if (lockref->count <= 1)
96008 return 0;
96009- lockref->count--;
96010+ __lockref_dec(lockref);
96011 spin_unlock(&lockref->lock);
96012 return 1;
96013 }
96014@@ -157,7 +157,7 @@ int lockref_get_not_dead(struct lockref *lockref)
96015 int retval;
96016
96017 CMPXCHG_LOOP(
96018- new.count++;
96019+ __lockref_inc(&new);
96020 if ((int)old.count < 0)
96021 return 0;
96022 ,
96023@@ -167,7 +167,7 @@ int lockref_get_not_dead(struct lockref *lockref)
96024 spin_lock(&lockref->lock);
96025 retval = 0;
96026 if ((int) lockref->count >= 0) {
96027- lockref->count++;
96028+ __lockref_inc(lockref);
96029 retval = 1;
96030 }
96031 spin_unlock(&lockref->lock);
96032diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
96033index a89cf09..1a42c2d 100644
96034--- a/lib/percpu-refcount.c
96035+++ b/lib/percpu-refcount.c
96036@@ -29,7 +29,7 @@
96037 * can't hit 0 before we've added up all the percpu refs.
96038 */
96039
96040-#define PCPU_COUNT_BIAS (1U << 31)
96041+#define PCPU_COUNT_BIAS (1U << 30)
96042
96043 static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref)
96044 {
96045diff --git a/lib/radix-tree.c b/lib/radix-tree.c
96046index 3291a8e..346a91e 100644
96047--- a/lib/radix-tree.c
96048+++ b/lib/radix-tree.c
96049@@ -67,7 +67,7 @@ struct radix_tree_preload {
96050 int nr;
96051 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
96052 };
96053-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
96054+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
96055
96056 static inline void *ptr_to_indirect(void *ptr)
96057 {
96058diff --git a/lib/random32.c b/lib/random32.c
96059index c9b6bf3..4752c6d4 100644
96060--- a/lib/random32.c
96061+++ b/lib/random32.c
96062@@ -46,7 +46,7 @@ static inline void prandom_state_selftest(void)
96063 }
96064 #endif
96065
96066-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
96067+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
96068
96069 /**
96070 * prandom_u32_state - seeded pseudo-random number generator.
96071diff --git a/lib/rbtree.c b/lib/rbtree.c
96072index c16c81a..4dcbda1 100644
96073--- a/lib/rbtree.c
96074+++ b/lib/rbtree.c
96075@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
96076 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
96077
96078 static const struct rb_augment_callbacks dummy_callbacks = {
96079- dummy_propagate, dummy_copy, dummy_rotate
96080+ .propagate = dummy_propagate,
96081+ .copy = dummy_copy,
96082+ .rotate = dummy_rotate
96083 };
96084
96085 void rb_insert_color(struct rb_node *node, struct rb_root *root)
96086diff --git a/lib/show_mem.c b/lib/show_mem.c
96087index 0922579..9d7adb9 100644
96088--- a/lib/show_mem.c
96089+++ b/lib/show_mem.c
96090@@ -44,6 +44,6 @@ void show_mem(unsigned int filter)
96091 quicklist_total_size());
96092 #endif
96093 #ifdef CONFIG_MEMORY_FAILURE
96094- printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
96095+ printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages));
96096 #endif
96097 }
96098diff --git a/lib/string.c b/lib/string.c
96099index f3c6ff5..70db57a 100644
96100--- a/lib/string.c
96101+++ b/lib/string.c
96102@@ -604,6 +604,22 @@ void *memset(void *s, int c, size_t count)
96103 EXPORT_SYMBOL(memset);
96104 #endif
96105
96106+/**
96107+ * memzero_explicit - Fill a region of memory (e.g. sensitive
96108+ * keying data) with 0s.
96109+ * @s: Pointer to the start of the area.
96110+ * @count: The size of the area.
96111+ *
96112+ * memzero_explicit() doesn't need an arch-specific version as
96113+ * it just invokes the one of memset() implicitly.
96114+ */
96115+void memzero_explicit(void *s, size_t count)
96116+{
96117+ memset(s, 0, count);
96118+ OPTIMIZER_HIDE_VAR(s);
96119+}
96120+EXPORT_SYMBOL(memzero_explicit);
96121+
96122 #ifndef __HAVE_ARCH_MEMCPY
96123 /**
96124 * memcpy - Copy one area of memory to another
96125diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
96126index bb2b201..46abaf9 100644
96127--- a/lib/strncpy_from_user.c
96128+++ b/lib/strncpy_from_user.c
96129@@ -21,7 +21,7 @@
96130 */
96131 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
96132 {
96133- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
96134+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
96135 long res = 0;
96136
96137 /*
96138diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
96139index a28df52..3d55877 100644
96140--- a/lib/strnlen_user.c
96141+++ b/lib/strnlen_user.c
96142@@ -26,7 +26,7 @@
96143 */
96144 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
96145 {
96146- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
96147+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
96148 long align, res = 0;
96149 unsigned long c;
96150
96151diff --git a/lib/swiotlb.c b/lib/swiotlb.c
96152index 4abda07..b9d3765 100644
96153--- a/lib/swiotlb.c
96154+++ b/lib/swiotlb.c
96155@@ -682,7 +682,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
96156
96157 void
96158 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
96159- dma_addr_t dev_addr)
96160+ dma_addr_t dev_addr, struct dma_attrs *attrs)
96161 {
96162 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
96163
96164diff --git a/lib/test_bpf.c b/lib/test_bpf.c
96165index 89e0345..3347efe 100644
96166--- a/lib/test_bpf.c
96167+++ b/lib/test_bpf.c
96168@@ -1798,7 +1798,7 @@ static struct bpf_prog *generate_filter(int which, int *err)
96169 break;
96170
96171 case INTERNAL:
96172- fp = kzalloc(bpf_prog_size(flen), GFP_KERNEL);
96173+ fp = bpf_prog_alloc(bpf_prog_size(flen), 0);
96174 if (fp == NULL) {
96175 pr_cont("UNEXPECTED_FAIL no memory left\n");
96176 *err = -ENOMEM;
96177diff --git a/lib/usercopy.c b/lib/usercopy.c
96178index 4f5b1dd..7cab418 100644
96179--- a/lib/usercopy.c
96180+++ b/lib/usercopy.c
96181@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
96182 WARN(1, "Buffer overflow detected!\n");
96183 }
96184 EXPORT_SYMBOL(copy_from_user_overflow);
96185+
96186+void copy_to_user_overflow(void)
96187+{
96188+ WARN(1, "Buffer overflow detected!\n");
96189+}
96190+EXPORT_SYMBOL(copy_to_user_overflow);
96191diff --git a/lib/vsprintf.c b/lib/vsprintf.c
96192index 6fe2c84..2fe5ec6 100644
96193--- a/lib/vsprintf.c
96194+++ b/lib/vsprintf.c
96195@@ -16,6 +16,9 @@
96196 * - scnprintf and vscnprintf
96197 */
96198
96199+#ifdef CONFIG_GRKERNSEC_HIDESYM
96200+#define __INCLUDED_BY_HIDESYM 1
96201+#endif
96202 #include <stdarg.h>
96203 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
96204 #include <linux/types.h>
96205@@ -624,7 +627,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
96206 #ifdef CONFIG_KALLSYMS
96207 if (*fmt == 'B')
96208 sprint_backtrace(sym, value);
96209- else if (*fmt != 'f' && *fmt != 's')
96210+ else if (*fmt != 'f' && *fmt != 's' && *fmt != 'X')
96211 sprint_symbol(sym, value);
96212 else
96213 sprint_symbol_no_offset(sym, value);
96214@@ -1183,7 +1186,11 @@ char *address_val(char *buf, char *end, const void *addr,
96215 return number(buf, end, num, spec);
96216 }
96217
96218+#ifdef CONFIG_GRKERNSEC_HIDESYM
96219+int kptr_restrict __read_mostly = 2;
96220+#else
96221 int kptr_restrict __read_mostly;
96222+#endif
96223
96224 /*
96225 * Show a '%p' thing. A kernel extension is that the '%p' is followed
96226@@ -1194,8 +1201,10 @@ int kptr_restrict __read_mostly;
96227 *
96228 * - 'F' For symbolic function descriptor pointers with offset
96229 * - 'f' For simple symbolic function names without offset
96230+ * - 'X' For simple symbolic function names without offset approved for use with GRKERNSEC_HIDESYM
96231 * - 'S' For symbolic direct pointers with offset
96232 * - 's' For symbolic direct pointers without offset
96233+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
96234 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
96235 * - 'B' For backtraced symbolic direct pointers with offset
96236 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
96237@@ -1263,12 +1272,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
96238
96239 if (!ptr && *fmt != 'K') {
96240 /*
96241- * Print (null) with the same width as a pointer so it makes
96242+ * Print (nil) with the same width as a pointer so it makes
96243 * tabular output look nice.
96244 */
96245 if (spec.field_width == -1)
96246 spec.field_width = default_width;
96247- return string(buf, end, "(null)", spec);
96248+ return string(buf, end, "(nil)", spec);
96249 }
96250
96251 switch (*fmt) {
96252@@ -1278,6 +1287,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
96253 /* Fallthrough */
96254 case 'S':
96255 case 's':
96256+#ifdef CONFIG_GRKERNSEC_HIDESYM
96257+ break;
96258+#else
96259+ return symbol_string(buf, end, ptr, spec, fmt);
96260+#endif
96261+ case 'X':
96262+ ptr = dereference_function_descriptor(ptr);
96263+ case 'A':
96264 case 'B':
96265 return symbol_string(buf, end, ptr, spec, fmt);
96266 case 'R':
96267@@ -1333,6 +1350,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
96268 va_end(va);
96269 return buf;
96270 }
96271+ case 'P':
96272+ break;
96273 case 'K':
96274 /*
96275 * %pK cannot be used in IRQ context because its test
96276@@ -1390,6 +1409,22 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
96277 ((const struct file *)ptr)->f_path.dentry,
96278 spec, fmt);
96279 }
96280+
96281+#ifdef CONFIG_GRKERNSEC_HIDESYM
96282+ /* 'P' = approved pointers to copy to userland,
96283+ as in the /proc/kallsyms case, as we make it display nothing
96284+ for non-root users, and the real contents for root users
96285+ 'X' = approved simple symbols
96286+ Also ignore 'K' pointers, since we force their NULLing for non-root users
96287+ above
96288+ */
96289+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'X' && *fmt != 'K' && is_usercopy_object(buf)) {
96290+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
96291+ dump_stack();
96292+ ptr = NULL;
96293+ }
96294+#endif
96295+
96296 spec.flags |= SMALL;
96297 if (spec.field_width == -1) {
96298 spec.field_width = default_width;
96299@@ -2089,11 +2124,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
96300 typeof(type) value; \
96301 if (sizeof(type) == 8) { \
96302 args = PTR_ALIGN(args, sizeof(u32)); \
96303- *(u32 *)&value = *(u32 *)args; \
96304- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
96305+ *(u32 *)&value = *(const u32 *)args; \
96306+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
96307 } else { \
96308 args = PTR_ALIGN(args, sizeof(type)); \
96309- value = *(typeof(type) *)args; \
96310+ value = *(const typeof(type) *)args; \
96311 } \
96312 args += sizeof(type); \
96313 value; \
96314@@ -2156,7 +2191,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
96315 case FORMAT_TYPE_STR: {
96316 const char *str_arg = args;
96317 args += strlen(str_arg) + 1;
96318- str = string(str, end, (char *)str_arg, spec);
96319+ str = string(str, end, str_arg, spec);
96320 break;
96321 }
96322
96323diff --git a/localversion-grsec b/localversion-grsec
96324new file mode 100644
96325index 0000000..7cd6065
96326--- /dev/null
96327+++ b/localversion-grsec
96328@@ -0,0 +1 @@
96329+-grsec
96330diff --git a/mm/Kconfig b/mm/Kconfig
96331index 886db21..f514de2 100644
96332--- a/mm/Kconfig
96333+++ b/mm/Kconfig
96334@@ -333,10 +333,11 @@ config KSM
96335 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
96336
96337 config DEFAULT_MMAP_MIN_ADDR
96338- int "Low address space to protect from user allocation"
96339+ int "Low address space to protect from user allocation"
96340 depends on MMU
96341- default 4096
96342- help
96343+ default 32768 if ALPHA || ARM || PARISC || SPARC32
96344+ default 65536
96345+ help
96346 This is the portion of low virtual memory which should be protected
96347 from userspace allocation. Keeping a user from writing to low pages
96348 can help reduce the impact of kernel NULL pointer bugs.
96349@@ -367,7 +368,7 @@ config MEMORY_FAILURE
96350
96351 config HWPOISON_INJECT
96352 tristate "HWPoison pages injector"
96353- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
96354+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
96355 select PROC_PAGE_MONITOR
96356
96357 config NOMMU_INITIAL_TRIM_EXCESS
96358diff --git a/mm/backing-dev.c b/mm/backing-dev.c
96359index 1706cbb..f89dbca 100644
96360--- a/mm/backing-dev.c
96361+++ b/mm/backing-dev.c
96362@@ -12,7 +12,7 @@
96363 #include <linux/device.h>
96364 #include <trace/events/writeback.h>
96365
96366-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
96367+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
96368
96369 struct backing_dev_info default_backing_dev_info = {
96370 .name = "default",
96371@@ -533,7 +533,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
96372 return err;
96373
96374 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
96375- atomic_long_inc_return(&bdi_seq));
96376+ atomic_long_inc_return_unchecked(&bdi_seq));
96377 if (err) {
96378 bdi_destroy(bdi);
96379 return err;
96380diff --git a/mm/filemap.c b/mm/filemap.c
96381index 90effcd..539aa64 100644
96382--- a/mm/filemap.c
96383+++ b/mm/filemap.c
96384@@ -2092,7 +2092,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
96385 struct address_space *mapping = file->f_mapping;
96386
96387 if (!mapping->a_ops->readpage)
96388- return -ENOEXEC;
96389+ return -ENODEV;
96390 file_accessed(file);
96391 vma->vm_ops = &generic_file_vm_ops;
96392 return 0;
96393@@ -2270,6 +2270,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
96394 *pos = i_size_read(inode);
96395
96396 if (limit != RLIM_INFINITY) {
96397+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
96398 if (*pos >= limit) {
96399 send_sig(SIGXFSZ, current, 0);
96400 return -EFBIG;
96401diff --git a/mm/fremap.c b/mm/fremap.c
96402index 72b8fa3..c5b39f1 100644
96403--- a/mm/fremap.c
96404+++ b/mm/fremap.c
96405@@ -180,6 +180,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
96406 retry:
96407 vma = find_vma(mm, start);
96408
96409+#ifdef CONFIG_PAX_SEGMEXEC
96410+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
96411+ goto out;
96412+#endif
96413+
96414 /*
96415 * Make sure the vma is shared, that it supports prefaulting,
96416 * and that the remapped range is valid and fully within
96417diff --git a/mm/gup.c b/mm/gup.c
96418index 91d044b..a58ecf6 100644
96419--- a/mm/gup.c
96420+++ b/mm/gup.c
96421@@ -270,11 +270,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
96422 unsigned int fault_flags = 0;
96423 int ret;
96424
96425- /* For mlock, just skip the stack guard page. */
96426- if ((*flags & FOLL_MLOCK) &&
96427- (stack_guard_page_start(vma, address) ||
96428- stack_guard_page_end(vma, address + PAGE_SIZE)))
96429- return -ENOENT;
96430 if (*flags & FOLL_WRITE)
96431 fault_flags |= FAULT_FLAG_WRITE;
96432 if (nonblocking)
96433@@ -436,14 +431,14 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
96434 if (!(gup_flags & FOLL_FORCE))
96435 gup_flags |= FOLL_NUMA;
96436
96437- do {
96438+ while (nr_pages) {
96439 struct page *page;
96440 unsigned int foll_flags = gup_flags;
96441 unsigned int page_increm;
96442
96443 /* first iteration or cross vma bound */
96444 if (!vma || start >= vma->vm_end) {
96445- vma = find_extend_vma(mm, start);
96446+ vma = find_vma(mm, start);
96447 if (!vma && in_gate_area(mm, start)) {
96448 int ret;
96449 ret = get_gate_page(mm, start & PAGE_MASK,
96450@@ -455,7 +450,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
96451 goto next_page;
96452 }
96453
96454- if (!vma || check_vma_flags(vma, gup_flags))
96455+ if (!vma || start < vma->vm_start || check_vma_flags(vma, gup_flags))
96456 return i ? : -EFAULT;
96457 if (is_vm_hugetlb_page(vma)) {
96458 i = follow_hugetlb_page(mm, vma, pages, vmas,
96459@@ -510,7 +505,7 @@ next_page:
96460 i += page_increm;
96461 start += page_increm * PAGE_SIZE;
96462 nr_pages -= page_increm;
96463- } while (nr_pages);
96464+ }
96465 return i;
96466 }
96467 EXPORT_SYMBOL(__get_user_pages);
96468diff --git a/mm/highmem.c b/mm/highmem.c
96469index 123bcd3..0de52ba 100644
96470--- a/mm/highmem.c
96471+++ b/mm/highmem.c
96472@@ -195,8 +195,9 @@ static void flush_all_zero_pkmaps(void)
96473 * So no dangers, even with speculative execution.
96474 */
96475 page = pte_page(pkmap_page_table[i]);
96476+ pax_open_kernel();
96477 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
96478-
96479+ pax_close_kernel();
96480 set_page_address(page, NULL);
96481 need_flush = 1;
96482 }
96483@@ -259,9 +260,11 @@ start:
96484 }
96485 }
96486 vaddr = PKMAP_ADDR(last_pkmap_nr);
96487+
96488+ pax_open_kernel();
96489 set_pte_at(&init_mm, vaddr,
96490 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
96491-
96492+ pax_close_kernel();
96493 pkmap_count[last_pkmap_nr] = 1;
96494 set_page_address(page, (void *)vaddr);
96495
96496diff --git a/mm/hugetlb.c b/mm/hugetlb.c
96497index eeceeeb..a209d58 100644
96498--- a/mm/hugetlb.c
96499+++ b/mm/hugetlb.c
96500@@ -2258,6 +2258,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
96501 struct ctl_table *table, int write,
96502 void __user *buffer, size_t *length, loff_t *ppos)
96503 {
96504+ ctl_table_no_const t;
96505 struct hstate *h = &default_hstate;
96506 unsigned long tmp = h->max_huge_pages;
96507 int ret;
96508@@ -2265,9 +2266,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
96509 if (!hugepages_supported())
96510 return -ENOTSUPP;
96511
96512- table->data = &tmp;
96513- table->maxlen = sizeof(unsigned long);
96514- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
96515+ t = *table;
96516+ t.data = &tmp;
96517+ t.maxlen = sizeof(unsigned long);
96518+ ret = proc_doulongvec_minmax(&t, write, buffer, length, ppos);
96519 if (ret)
96520 goto out;
96521
96522@@ -2302,6 +2304,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
96523 struct hstate *h = &default_hstate;
96524 unsigned long tmp;
96525 int ret;
96526+ ctl_table_no_const hugetlb_table;
96527
96528 if (!hugepages_supported())
96529 return -ENOTSUPP;
96530@@ -2311,9 +2314,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
96531 if (write && hstate_is_gigantic(h))
96532 return -EINVAL;
96533
96534- table->data = &tmp;
96535- table->maxlen = sizeof(unsigned long);
96536- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
96537+ hugetlb_table = *table;
96538+ hugetlb_table.data = &tmp;
96539+ hugetlb_table.maxlen = sizeof(unsigned long);
96540+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
96541 if (ret)
96542 goto out;
96543
96544@@ -2792,6 +2796,27 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
96545 mutex_unlock(&mapping->i_mmap_mutex);
96546 }
96547
96548+#ifdef CONFIG_PAX_SEGMEXEC
96549+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
96550+{
96551+ struct mm_struct *mm = vma->vm_mm;
96552+ struct vm_area_struct *vma_m;
96553+ unsigned long address_m;
96554+ pte_t *ptep_m;
96555+
96556+ vma_m = pax_find_mirror_vma(vma);
96557+ if (!vma_m)
96558+ return;
96559+
96560+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96561+ address_m = address + SEGMEXEC_TASK_SIZE;
96562+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
96563+ get_page(page_m);
96564+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
96565+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
96566+}
96567+#endif
96568+
96569 /*
96570 * Hugetlb_cow() should be called with page lock of the original hugepage held.
96571 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
96572@@ -2903,6 +2928,11 @@ retry_avoidcopy:
96573 make_huge_pte(vma, new_page, 1));
96574 page_remove_rmap(old_page);
96575 hugepage_add_new_anon_rmap(new_page, vma, address);
96576+
96577+#ifdef CONFIG_PAX_SEGMEXEC
96578+ pax_mirror_huge_pte(vma, address, new_page);
96579+#endif
96580+
96581 /* Make the old page be freed below */
96582 new_page = old_page;
96583 }
96584@@ -3063,6 +3093,10 @@ retry:
96585 && (vma->vm_flags & VM_SHARED)));
96586 set_huge_pte_at(mm, address, ptep, new_pte);
96587
96588+#ifdef CONFIG_PAX_SEGMEXEC
96589+ pax_mirror_huge_pte(vma, address, page);
96590+#endif
96591+
96592 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
96593 /* Optimization, do the COW without a second fault */
96594 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
96595@@ -3129,6 +3163,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96596 struct hstate *h = hstate_vma(vma);
96597 struct address_space *mapping;
96598
96599+#ifdef CONFIG_PAX_SEGMEXEC
96600+ struct vm_area_struct *vma_m;
96601+#endif
96602+
96603 address &= huge_page_mask(h);
96604
96605 ptep = huge_pte_offset(mm, address);
96606@@ -3142,6 +3180,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96607 VM_FAULT_SET_HINDEX(hstate_index(h));
96608 }
96609
96610+#ifdef CONFIG_PAX_SEGMEXEC
96611+ vma_m = pax_find_mirror_vma(vma);
96612+ if (vma_m) {
96613+ unsigned long address_m;
96614+
96615+ if (vma->vm_start > vma_m->vm_start) {
96616+ address_m = address;
96617+ address -= SEGMEXEC_TASK_SIZE;
96618+ vma = vma_m;
96619+ h = hstate_vma(vma);
96620+ } else
96621+ address_m = address + SEGMEXEC_TASK_SIZE;
96622+
96623+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
96624+ return VM_FAULT_OOM;
96625+ address_m &= HPAGE_MASK;
96626+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
96627+ }
96628+#endif
96629+
96630 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
96631 if (!ptep)
96632 return VM_FAULT_OOM;
96633diff --git a/mm/internal.h b/mm/internal.h
96634index a1b651b..f688570 100644
96635--- a/mm/internal.h
96636+++ b/mm/internal.h
96637@@ -109,6 +109,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
96638 * in mm/page_alloc.c
96639 */
96640 extern void __free_pages_bootmem(struct page *page, unsigned int order);
96641+extern void free_compound_page(struct page *page);
96642 extern void prep_compound_page(struct page *page, unsigned long order);
96643 #ifdef CONFIG_MEMORY_FAILURE
96644 extern bool is_free_buddy_page(struct page *page);
96645@@ -351,7 +352,7 @@ extern u32 hwpoison_filter_enable;
96646
96647 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
96648 unsigned long, unsigned long,
96649- unsigned long, unsigned long);
96650+ unsigned long, unsigned long) __intentional_overflow(-1);
96651
96652 extern void set_pageblock_order(void);
96653 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
96654diff --git a/mm/iov_iter.c b/mm/iov_iter.c
96655index 9a09f20..6ef0515 100644
96656--- a/mm/iov_iter.c
96657+++ b/mm/iov_iter.c
96658@@ -173,7 +173,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
96659
96660 while (bytes) {
96661 char __user *buf = iov->iov_base + base;
96662- int copy = min(bytes, iov->iov_len - base);
96663+ size_t copy = min(bytes, iov->iov_len - base);
96664
96665 base = 0;
96666 left = __copy_from_user_inatomic(vaddr, buf, copy);
96667@@ -201,7 +201,7 @@ static size_t copy_from_user_atomic_iovec(struct page *page,
96668
96669 kaddr = kmap_atomic(page);
96670 if (likely(i->nr_segs == 1)) {
96671- int left;
96672+ size_t left;
96673 char __user *buf = i->iov->iov_base + i->iov_offset;
96674 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
96675 copied = bytes - left;
96676@@ -231,7 +231,7 @@ static void advance_iovec(struct iov_iter *i, size_t bytes)
96677 * zero-length segments (without overruning the iovec).
96678 */
96679 while (bytes || unlikely(i->count && !iov->iov_len)) {
96680- int copy;
96681+ size_t copy;
96682
96683 copy = min(bytes, iov->iov_len - base);
96684 BUG_ON(!i->count || i->count < copy);
96685diff --git a/mm/kmemleak.c b/mm/kmemleak.c
96686index 3cda50c..032ba634 100644
96687--- a/mm/kmemleak.c
96688+++ b/mm/kmemleak.c
96689@@ -364,7 +364,7 @@ static void print_unreferenced(struct seq_file *seq,
96690
96691 for (i = 0; i < object->trace_len; i++) {
96692 void *ptr = (void *)object->trace[i];
96693- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
96694+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
96695 }
96696 }
96697
96698@@ -1905,7 +1905,7 @@ static int __init kmemleak_late_init(void)
96699 return -ENOMEM;
96700 }
96701
96702- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
96703+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
96704 &kmemleak_fops);
96705 if (!dentry)
96706 pr_warning("Failed to create the debugfs kmemleak file\n");
96707diff --git a/mm/maccess.c b/mm/maccess.c
96708index d53adf9..03a24bf 100644
96709--- a/mm/maccess.c
96710+++ b/mm/maccess.c
96711@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
96712 set_fs(KERNEL_DS);
96713 pagefault_disable();
96714 ret = __copy_from_user_inatomic(dst,
96715- (__force const void __user *)src, size);
96716+ (const void __force_user *)src, size);
96717 pagefault_enable();
96718 set_fs(old_fs);
96719
96720@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
96721
96722 set_fs(KERNEL_DS);
96723 pagefault_disable();
96724- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
96725+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
96726 pagefault_enable();
96727 set_fs(old_fs);
96728
96729diff --git a/mm/madvise.c b/mm/madvise.c
96730index 0938b30..199abe8 100644
96731--- a/mm/madvise.c
96732+++ b/mm/madvise.c
96733@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
96734 pgoff_t pgoff;
96735 unsigned long new_flags = vma->vm_flags;
96736
96737+#ifdef CONFIG_PAX_SEGMEXEC
96738+ struct vm_area_struct *vma_m;
96739+#endif
96740+
96741 switch (behavior) {
96742 case MADV_NORMAL:
96743 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
96744@@ -126,6 +130,13 @@ success:
96745 /*
96746 * vm_flags is protected by the mmap_sem held in write mode.
96747 */
96748+
96749+#ifdef CONFIG_PAX_SEGMEXEC
96750+ vma_m = pax_find_mirror_vma(vma);
96751+ if (vma_m)
96752+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
96753+#endif
96754+
96755 vma->vm_flags = new_flags;
96756
96757 out:
96758@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct *vma,
96759 struct vm_area_struct **prev,
96760 unsigned long start, unsigned long end)
96761 {
96762+
96763+#ifdef CONFIG_PAX_SEGMEXEC
96764+ struct vm_area_struct *vma_m;
96765+#endif
96766+
96767 *prev = vma;
96768 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
96769 return -EINVAL;
96770@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct *vma,
96771 zap_page_range(vma, start, end - start, &details);
96772 } else
96773 zap_page_range(vma, start, end - start, NULL);
96774+
96775+#ifdef CONFIG_PAX_SEGMEXEC
96776+ vma_m = pax_find_mirror_vma(vma);
96777+ if (vma_m) {
96778+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
96779+ struct zap_details details = {
96780+ .nonlinear_vma = vma_m,
96781+ .last_index = ULONG_MAX,
96782+ };
96783+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
96784+ } else
96785+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
96786+ }
96787+#endif
96788+
96789 return 0;
96790 }
96791
96792@@ -488,6 +519,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
96793 if (end < start)
96794 return error;
96795
96796+#ifdef CONFIG_PAX_SEGMEXEC
96797+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
96798+ if (end > SEGMEXEC_TASK_SIZE)
96799+ return error;
96800+ } else
96801+#endif
96802+
96803+ if (end > TASK_SIZE)
96804+ return error;
96805+
96806 error = 0;
96807 if (end == start)
96808 return error;
96809diff --git a/mm/memory-failure.c b/mm/memory-failure.c
96810index 44c6bd2..60369dc3 100644
96811--- a/mm/memory-failure.c
96812+++ b/mm/memory-failure.c
96813@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
96814
96815 int sysctl_memory_failure_recovery __read_mostly = 1;
96816
96817-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
96818+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
96819
96820 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
96821
96822@@ -198,7 +198,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
96823 pfn, t->comm, t->pid);
96824 si.si_signo = SIGBUS;
96825 si.si_errno = 0;
96826- si.si_addr = (void *)addr;
96827+ si.si_addr = (void __user *)addr;
96828 #ifdef __ARCH_SI_TRAPNO
96829 si.si_trapno = trapno;
96830 #endif
96831@@ -791,7 +791,7 @@ static struct page_state {
96832 unsigned long res;
96833 char *msg;
96834 int (*action)(struct page *p, unsigned long pfn);
96835-} error_states[] = {
96836+} __do_const error_states[] = {
96837 { reserved, reserved, "reserved kernel", me_kernel },
96838 /*
96839 * free pages are specially detected outside this table:
96840@@ -1099,7 +1099,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
96841 nr_pages = 1 << compound_order(hpage);
96842 else /* normal page or thp */
96843 nr_pages = 1;
96844- atomic_long_add(nr_pages, &num_poisoned_pages);
96845+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
96846
96847 /*
96848 * We need/can do nothing about count=0 pages.
96849@@ -1128,7 +1128,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
96850 if (PageHWPoison(hpage)) {
96851 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
96852 || (p != hpage && TestSetPageHWPoison(hpage))) {
96853- atomic_long_sub(nr_pages, &num_poisoned_pages);
96854+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
96855 unlock_page(hpage);
96856 return 0;
96857 }
96858@@ -1196,14 +1196,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
96859 */
96860 if (!PageHWPoison(p)) {
96861 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
96862- atomic_long_sub(nr_pages, &num_poisoned_pages);
96863+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
96864 put_page(hpage);
96865 res = 0;
96866 goto out;
96867 }
96868 if (hwpoison_filter(p)) {
96869 if (TestClearPageHWPoison(p))
96870- atomic_long_sub(nr_pages, &num_poisoned_pages);
96871+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
96872 unlock_page(hpage);
96873 put_page(hpage);
96874 return 0;
96875@@ -1433,7 +1433,7 @@ int unpoison_memory(unsigned long pfn)
96876 return 0;
96877 }
96878 if (TestClearPageHWPoison(p))
96879- atomic_long_dec(&num_poisoned_pages);
96880+ atomic_long_dec_unchecked(&num_poisoned_pages);
96881 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
96882 return 0;
96883 }
96884@@ -1447,7 +1447,7 @@ int unpoison_memory(unsigned long pfn)
96885 */
96886 if (TestClearPageHWPoison(page)) {
96887 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
96888- atomic_long_sub(nr_pages, &num_poisoned_pages);
96889+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
96890 freeit = 1;
96891 if (PageHuge(page))
96892 clear_page_hwpoison_huge_page(page);
96893@@ -1572,11 +1572,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
96894 if (PageHuge(page)) {
96895 set_page_hwpoison_huge_page(hpage);
96896 dequeue_hwpoisoned_huge_page(hpage);
96897- atomic_long_add(1 << compound_order(hpage),
96898+ atomic_long_add_unchecked(1 << compound_order(hpage),
96899 &num_poisoned_pages);
96900 } else {
96901 SetPageHWPoison(page);
96902- atomic_long_inc(&num_poisoned_pages);
96903+ atomic_long_inc_unchecked(&num_poisoned_pages);
96904 }
96905 }
96906 return ret;
96907@@ -1615,7 +1615,7 @@ static int __soft_offline_page(struct page *page, int flags)
96908 put_page(page);
96909 pr_info("soft_offline: %#lx: invalidated\n", pfn);
96910 SetPageHWPoison(page);
96911- atomic_long_inc(&num_poisoned_pages);
96912+ atomic_long_inc_unchecked(&num_poisoned_pages);
96913 return 0;
96914 }
96915
96916@@ -1666,7 +1666,7 @@ static int __soft_offline_page(struct page *page, int flags)
96917 if (!is_free_buddy_page(page))
96918 pr_info("soft offline: %#lx: page leaked\n",
96919 pfn);
96920- atomic_long_inc(&num_poisoned_pages);
96921+ atomic_long_inc_unchecked(&num_poisoned_pages);
96922 }
96923 } else {
96924 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
96925@@ -1736,11 +1736,11 @@ int soft_offline_page(struct page *page, int flags)
96926 if (PageHuge(page)) {
96927 set_page_hwpoison_huge_page(hpage);
96928 dequeue_hwpoisoned_huge_page(hpage);
96929- atomic_long_add(1 << compound_order(hpage),
96930+ atomic_long_add_unchecked(1 << compound_order(hpage),
96931 &num_poisoned_pages);
96932 } else {
96933 SetPageHWPoison(page);
96934- atomic_long_inc(&num_poisoned_pages);
96935+ atomic_long_inc_unchecked(&num_poisoned_pages);
96936 }
96937 }
96938 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
96939diff --git a/mm/memory.c b/mm/memory.c
96940index e229970..68218aa 100644
96941--- a/mm/memory.c
96942+++ b/mm/memory.c
96943@@ -415,6 +415,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
96944 free_pte_range(tlb, pmd, addr);
96945 } while (pmd++, addr = next, addr != end);
96946
96947+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
96948 start &= PUD_MASK;
96949 if (start < floor)
96950 return;
96951@@ -429,6 +430,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
96952 pmd = pmd_offset(pud, start);
96953 pud_clear(pud);
96954 pmd_free_tlb(tlb, pmd, start);
96955+#endif
96956+
96957 }
96958
96959 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96960@@ -448,6 +451,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96961 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
96962 } while (pud++, addr = next, addr != end);
96963
96964+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
96965 start &= PGDIR_MASK;
96966 if (start < floor)
96967 return;
96968@@ -462,6 +466,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96969 pud = pud_offset(pgd, start);
96970 pgd_clear(pgd);
96971 pud_free_tlb(tlb, pud, start);
96972+#endif
96973+
96974 }
96975
96976 /*
96977@@ -691,10 +697,10 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
96978 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
96979 */
96980 if (vma->vm_ops)
96981- printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n",
96982+ printk(KERN_ALERT "vma->vm_ops->fault: %pAR\n",
96983 vma->vm_ops->fault);
96984 if (vma->vm_file)
96985- printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n",
96986+ printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pAR\n",
96987 vma->vm_file->f_op->mmap);
96988 dump_stack();
96989 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
96990@@ -1147,6 +1153,7 @@ again:
96991 print_bad_pte(vma, addr, ptent, page);
96992 if (unlikely(!__tlb_remove_page(tlb, page))) {
96993 force_flush = 1;
96994+ addr += PAGE_SIZE;
96995 break;
96996 }
96997 continue;
96998@@ -1500,6 +1507,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
96999 page_add_file_rmap(page);
97000 set_pte_at(mm, addr, pte, mk_pte(page, prot));
97001
97002+#ifdef CONFIG_PAX_SEGMEXEC
97003+ pax_mirror_file_pte(vma, addr, page, ptl);
97004+#endif
97005+
97006 retval = 0;
97007 pte_unmap_unlock(pte, ptl);
97008 return retval;
97009@@ -1544,9 +1555,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
97010 if (!page_count(page))
97011 return -EINVAL;
97012 if (!(vma->vm_flags & VM_MIXEDMAP)) {
97013+
97014+#ifdef CONFIG_PAX_SEGMEXEC
97015+ struct vm_area_struct *vma_m;
97016+#endif
97017+
97018 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
97019 BUG_ON(vma->vm_flags & VM_PFNMAP);
97020 vma->vm_flags |= VM_MIXEDMAP;
97021+
97022+#ifdef CONFIG_PAX_SEGMEXEC
97023+ vma_m = pax_find_mirror_vma(vma);
97024+ if (vma_m)
97025+ vma_m->vm_flags |= VM_MIXEDMAP;
97026+#endif
97027+
97028 }
97029 return insert_page(vma, addr, page, vma->vm_page_prot);
97030 }
97031@@ -1629,6 +1652,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
97032 unsigned long pfn)
97033 {
97034 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
97035+ BUG_ON(vma->vm_mirror);
97036
97037 if (addr < vma->vm_start || addr >= vma->vm_end)
97038 return -EFAULT;
97039@@ -1876,7 +1900,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
97040
97041 BUG_ON(pud_huge(*pud));
97042
97043- pmd = pmd_alloc(mm, pud, addr);
97044+ pmd = (mm == &init_mm) ?
97045+ pmd_alloc_kernel(mm, pud, addr) :
97046+ pmd_alloc(mm, pud, addr);
97047 if (!pmd)
97048 return -ENOMEM;
97049 do {
97050@@ -1896,7 +1922,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
97051 unsigned long next;
97052 int err;
97053
97054- pud = pud_alloc(mm, pgd, addr);
97055+ pud = (mm == &init_mm) ?
97056+ pud_alloc_kernel(mm, pgd, addr) :
97057+ pud_alloc(mm, pgd, addr);
97058 if (!pud)
97059 return -ENOMEM;
97060 do {
97061@@ -2018,6 +2046,186 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
97062 return ret;
97063 }
97064
97065+#ifdef CONFIG_PAX_SEGMEXEC
97066+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
97067+{
97068+ struct mm_struct *mm = vma->vm_mm;
97069+ spinlock_t *ptl;
97070+ pte_t *pte, entry;
97071+
97072+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
97073+ entry = *pte;
97074+ if (!pte_present(entry)) {
97075+ if (!pte_none(entry)) {
97076+ BUG_ON(pte_file(entry));
97077+ free_swap_and_cache(pte_to_swp_entry(entry));
97078+ pte_clear_not_present_full(mm, address, pte, 0);
97079+ }
97080+ } else {
97081+ struct page *page;
97082+
97083+ flush_cache_page(vma, address, pte_pfn(entry));
97084+ entry = ptep_clear_flush(vma, address, pte);
97085+ BUG_ON(pte_dirty(entry));
97086+ page = vm_normal_page(vma, address, entry);
97087+ if (page) {
97088+ update_hiwater_rss(mm);
97089+ if (PageAnon(page))
97090+ dec_mm_counter_fast(mm, MM_ANONPAGES);
97091+ else
97092+ dec_mm_counter_fast(mm, MM_FILEPAGES);
97093+ page_remove_rmap(page);
97094+ page_cache_release(page);
97095+ }
97096+ }
97097+ pte_unmap_unlock(pte, ptl);
97098+}
97099+
97100+/* PaX: if vma is mirrored, synchronize the mirror's PTE
97101+ *
97102+ * the ptl of the lower mapped page is held on entry and is not released on exit
97103+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
97104+ */
97105+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
97106+{
97107+ struct mm_struct *mm = vma->vm_mm;
97108+ unsigned long address_m;
97109+ spinlock_t *ptl_m;
97110+ struct vm_area_struct *vma_m;
97111+ pmd_t *pmd_m;
97112+ pte_t *pte_m, entry_m;
97113+
97114+ BUG_ON(!page_m || !PageAnon(page_m));
97115+
97116+ vma_m = pax_find_mirror_vma(vma);
97117+ if (!vma_m)
97118+ return;
97119+
97120+ BUG_ON(!PageLocked(page_m));
97121+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
97122+ address_m = address + SEGMEXEC_TASK_SIZE;
97123+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
97124+ pte_m = pte_offset_map(pmd_m, address_m);
97125+ ptl_m = pte_lockptr(mm, pmd_m);
97126+ if (ptl != ptl_m) {
97127+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
97128+ if (!pte_none(*pte_m))
97129+ goto out;
97130+ }
97131+
97132+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
97133+ page_cache_get(page_m);
97134+ page_add_anon_rmap(page_m, vma_m, address_m);
97135+ inc_mm_counter_fast(mm, MM_ANONPAGES);
97136+ set_pte_at(mm, address_m, pte_m, entry_m);
97137+ update_mmu_cache(vma_m, address_m, pte_m);
97138+out:
97139+ if (ptl != ptl_m)
97140+ spin_unlock(ptl_m);
97141+ pte_unmap(pte_m);
97142+ unlock_page(page_m);
97143+}
97144+
97145+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
97146+{
97147+ struct mm_struct *mm = vma->vm_mm;
97148+ unsigned long address_m;
97149+ spinlock_t *ptl_m;
97150+ struct vm_area_struct *vma_m;
97151+ pmd_t *pmd_m;
97152+ pte_t *pte_m, entry_m;
97153+
97154+ BUG_ON(!page_m || PageAnon(page_m));
97155+
97156+ vma_m = pax_find_mirror_vma(vma);
97157+ if (!vma_m)
97158+ return;
97159+
97160+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
97161+ address_m = address + SEGMEXEC_TASK_SIZE;
97162+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
97163+ pte_m = pte_offset_map(pmd_m, address_m);
97164+ ptl_m = pte_lockptr(mm, pmd_m);
97165+ if (ptl != ptl_m) {
97166+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
97167+ if (!pte_none(*pte_m))
97168+ goto out;
97169+ }
97170+
97171+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
97172+ page_cache_get(page_m);
97173+ page_add_file_rmap(page_m);
97174+ inc_mm_counter_fast(mm, MM_FILEPAGES);
97175+ set_pte_at(mm, address_m, pte_m, entry_m);
97176+ update_mmu_cache(vma_m, address_m, pte_m);
97177+out:
97178+ if (ptl != ptl_m)
97179+ spin_unlock(ptl_m);
97180+ pte_unmap(pte_m);
97181+}
97182+
97183+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
97184+{
97185+ struct mm_struct *mm = vma->vm_mm;
97186+ unsigned long address_m;
97187+ spinlock_t *ptl_m;
97188+ struct vm_area_struct *vma_m;
97189+ pmd_t *pmd_m;
97190+ pte_t *pte_m, entry_m;
97191+
97192+ vma_m = pax_find_mirror_vma(vma);
97193+ if (!vma_m)
97194+ return;
97195+
97196+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
97197+ address_m = address + SEGMEXEC_TASK_SIZE;
97198+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
97199+ pte_m = pte_offset_map(pmd_m, address_m);
97200+ ptl_m = pte_lockptr(mm, pmd_m);
97201+ if (ptl != ptl_m) {
97202+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
97203+ if (!pte_none(*pte_m))
97204+ goto out;
97205+ }
97206+
97207+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
97208+ set_pte_at(mm, address_m, pte_m, entry_m);
97209+out:
97210+ if (ptl != ptl_m)
97211+ spin_unlock(ptl_m);
97212+ pte_unmap(pte_m);
97213+}
97214+
97215+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
97216+{
97217+ struct page *page_m;
97218+ pte_t entry;
97219+
97220+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
97221+ goto out;
97222+
97223+ entry = *pte;
97224+ page_m = vm_normal_page(vma, address, entry);
97225+ if (!page_m)
97226+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
97227+ else if (PageAnon(page_m)) {
97228+ if (pax_find_mirror_vma(vma)) {
97229+ pte_unmap_unlock(pte, ptl);
97230+ lock_page(page_m);
97231+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
97232+ if (pte_same(entry, *pte))
97233+ pax_mirror_anon_pte(vma, address, page_m, ptl);
97234+ else
97235+ unlock_page(page_m);
97236+ }
97237+ } else
97238+ pax_mirror_file_pte(vma, address, page_m, ptl);
97239+
97240+out:
97241+ pte_unmap_unlock(pte, ptl);
97242+}
97243+#endif
97244+
97245 /*
97246 * This routine handles present pages, when users try to write
97247 * to a shared page. It is done by copying the page to a new address
97248@@ -2216,6 +2424,12 @@ gotten:
97249 */
97250 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
97251 if (likely(pte_same(*page_table, orig_pte))) {
97252+
97253+#ifdef CONFIG_PAX_SEGMEXEC
97254+ if (pax_find_mirror_vma(vma))
97255+ BUG_ON(!trylock_page(new_page));
97256+#endif
97257+
97258 if (old_page) {
97259 if (!PageAnon(old_page)) {
97260 dec_mm_counter_fast(mm, MM_FILEPAGES);
97261@@ -2269,6 +2483,10 @@ gotten:
97262 page_remove_rmap(old_page);
97263 }
97264
97265+#ifdef CONFIG_PAX_SEGMEXEC
97266+ pax_mirror_anon_pte(vma, address, new_page, ptl);
97267+#endif
97268+
97269 /* Free the old page.. */
97270 new_page = old_page;
97271 ret |= VM_FAULT_WRITE;
97272@@ -2543,6 +2761,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
97273 swap_free(entry);
97274 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
97275 try_to_free_swap(page);
97276+
97277+#ifdef CONFIG_PAX_SEGMEXEC
97278+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
97279+#endif
97280+
97281 unlock_page(page);
97282 if (page != swapcache) {
97283 /*
97284@@ -2566,6 +2789,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
97285
97286 /* No need to invalidate - it was non-present before */
97287 update_mmu_cache(vma, address, page_table);
97288+
97289+#ifdef CONFIG_PAX_SEGMEXEC
97290+ pax_mirror_anon_pte(vma, address, page, ptl);
97291+#endif
97292+
97293 unlock:
97294 pte_unmap_unlock(page_table, ptl);
97295 out:
97296@@ -2585,40 +2813,6 @@ out_release:
97297 }
97298
97299 /*
97300- * This is like a special single-page "expand_{down|up}wards()",
97301- * except we must first make sure that 'address{-|+}PAGE_SIZE'
97302- * doesn't hit another vma.
97303- */
97304-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
97305-{
97306- address &= PAGE_MASK;
97307- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
97308- struct vm_area_struct *prev = vma->vm_prev;
97309-
97310- /*
97311- * Is there a mapping abutting this one below?
97312- *
97313- * That's only ok if it's the same stack mapping
97314- * that has gotten split..
97315- */
97316- if (prev && prev->vm_end == address)
97317- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
97318-
97319- expand_downwards(vma, address - PAGE_SIZE);
97320- }
97321- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
97322- struct vm_area_struct *next = vma->vm_next;
97323-
97324- /* As VM_GROWSDOWN but s/below/above/ */
97325- if (next && next->vm_start == address + PAGE_SIZE)
97326- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
97327-
97328- expand_upwards(vma, address + PAGE_SIZE);
97329- }
97330- return 0;
97331-}
97332-
97333-/*
97334 * We enter with non-exclusive mmap_sem (to exclude vma changes,
97335 * but allow concurrent faults), and pte mapped but not yet locked.
97336 * We return with mmap_sem still held, but pte unmapped and unlocked.
97337@@ -2628,27 +2822,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
97338 unsigned int flags)
97339 {
97340 struct mem_cgroup *memcg;
97341- struct page *page;
97342+ struct page *page = NULL;
97343 spinlock_t *ptl;
97344 pte_t entry;
97345
97346- pte_unmap(page_table);
97347-
97348- /* Check if we need to add a guard page to the stack */
97349- if (check_stack_guard_page(vma, address) < 0)
97350- return VM_FAULT_SIGBUS;
97351-
97352- /* Use the zero-page for reads */
97353 if (!(flags & FAULT_FLAG_WRITE)) {
97354 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
97355 vma->vm_page_prot));
97356- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
97357+ ptl = pte_lockptr(mm, pmd);
97358+ spin_lock(ptl);
97359 if (!pte_none(*page_table))
97360 goto unlock;
97361 goto setpte;
97362 }
97363
97364 /* Allocate our own private page. */
97365+ pte_unmap(page_table);
97366+
97367 if (unlikely(anon_vma_prepare(vma)))
97368 goto oom;
97369 page = alloc_zeroed_user_highpage_movable(vma, address);
97370@@ -2672,6 +2862,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
97371 if (!pte_none(*page_table))
97372 goto release;
97373
97374+#ifdef CONFIG_PAX_SEGMEXEC
97375+ if (pax_find_mirror_vma(vma))
97376+ BUG_ON(!trylock_page(page));
97377+#endif
97378+
97379 inc_mm_counter_fast(mm, MM_ANONPAGES);
97380 page_add_new_anon_rmap(page, vma, address);
97381 mem_cgroup_commit_charge(page, memcg, false);
97382@@ -2681,6 +2876,12 @@ setpte:
97383
97384 /* No need to invalidate - it was non-present before */
97385 update_mmu_cache(vma, address, page_table);
97386+
97387+#ifdef CONFIG_PAX_SEGMEXEC
97388+ if (page)
97389+ pax_mirror_anon_pte(vma, address, page, ptl);
97390+#endif
97391+
97392 unlock:
97393 pte_unmap_unlock(page_table, ptl);
97394 return 0;
97395@@ -2911,6 +3112,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
97396 return ret;
97397 }
97398 do_set_pte(vma, address, fault_page, pte, false, false);
97399+
97400+#ifdef CONFIG_PAX_SEGMEXEC
97401+ pax_mirror_file_pte(vma, address, fault_page, ptl);
97402+#endif
97403+
97404 unlock_page(fault_page);
97405 unlock_out:
97406 pte_unmap_unlock(pte, ptl);
97407@@ -2953,7 +3159,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
97408 page_cache_release(fault_page);
97409 goto uncharge_out;
97410 }
97411+
97412+#ifdef CONFIG_PAX_SEGMEXEC
97413+ if (pax_find_mirror_vma(vma))
97414+ BUG_ON(!trylock_page(new_page));
97415+#endif
97416+
97417 do_set_pte(vma, address, new_page, pte, true, true);
97418+
97419+#ifdef CONFIG_PAX_SEGMEXEC
97420+ pax_mirror_anon_pte(vma, address, new_page, ptl);
97421+#endif
97422+
97423 mem_cgroup_commit_charge(new_page, memcg, false);
97424 lru_cache_add_active_or_unevictable(new_page, vma);
97425 pte_unmap_unlock(pte, ptl);
97426@@ -3003,6 +3220,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
97427 return ret;
97428 }
97429 do_set_pte(vma, address, fault_page, pte, true, false);
97430+
97431+#ifdef CONFIG_PAX_SEGMEXEC
97432+ pax_mirror_file_pte(vma, address, fault_page, ptl);
97433+#endif
97434+
97435 pte_unmap_unlock(pte, ptl);
97436
97437 if (set_page_dirty(fault_page))
97438@@ -3244,6 +3466,12 @@ static int handle_pte_fault(struct mm_struct *mm,
97439 if (flags & FAULT_FLAG_WRITE)
97440 flush_tlb_fix_spurious_fault(vma, address);
97441 }
97442+
97443+#ifdef CONFIG_PAX_SEGMEXEC
97444+ pax_mirror_pte(vma, address, pte, pmd, ptl);
97445+ return 0;
97446+#endif
97447+
97448 unlock:
97449 pte_unmap_unlock(pte, ptl);
97450 return 0;
97451@@ -3263,9 +3491,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
97452 pmd_t *pmd;
97453 pte_t *pte;
97454
97455+#ifdef CONFIG_PAX_SEGMEXEC
97456+ struct vm_area_struct *vma_m;
97457+#endif
97458+
97459 if (unlikely(is_vm_hugetlb_page(vma)))
97460 return hugetlb_fault(mm, vma, address, flags);
97461
97462+#ifdef CONFIG_PAX_SEGMEXEC
97463+ vma_m = pax_find_mirror_vma(vma);
97464+ if (vma_m) {
97465+ unsigned long address_m;
97466+ pgd_t *pgd_m;
97467+ pud_t *pud_m;
97468+ pmd_t *pmd_m;
97469+
97470+ if (vma->vm_start > vma_m->vm_start) {
97471+ address_m = address;
97472+ address -= SEGMEXEC_TASK_SIZE;
97473+ vma = vma_m;
97474+ } else
97475+ address_m = address + SEGMEXEC_TASK_SIZE;
97476+
97477+ pgd_m = pgd_offset(mm, address_m);
97478+ pud_m = pud_alloc(mm, pgd_m, address_m);
97479+ if (!pud_m)
97480+ return VM_FAULT_OOM;
97481+ pmd_m = pmd_alloc(mm, pud_m, address_m);
97482+ if (!pmd_m)
97483+ return VM_FAULT_OOM;
97484+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
97485+ return VM_FAULT_OOM;
97486+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
97487+ }
97488+#endif
97489+
97490 pgd = pgd_offset(mm, address);
97491 pud = pud_alloc(mm, pgd, address);
97492 if (!pud)
97493@@ -3399,6 +3659,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
97494 spin_unlock(&mm->page_table_lock);
97495 return 0;
97496 }
97497+
97498+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
97499+{
97500+ pud_t *new = pud_alloc_one(mm, address);
97501+ if (!new)
97502+ return -ENOMEM;
97503+
97504+ smp_wmb(); /* See comment in __pte_alloc */
97505+
97506+ spin_lock(&mm->page_table_lock);
97507+ if (pgd_present(*pgd)) /* Another has populated it */
97508+ pud_free(mm, new);
97509+ else
97510+ pgd_populate_kernel(mm, pgd, new);
97511+ spin_unlock(&mm->page_table_lock);
97512+ return 0;
97513+}
97514 #endif /* __PAGETABLE_PUD_FOLDED */
97515
97516 #ifndef __PAGETABLE_PMD_FOLDED
97517@@ -3429,6 +3706,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
97518 spin_unlock(&mm->page_table_lock);
97519 return 0;
97520 }
97521+
97522+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
97523+{
97524+ pmd_t *new = pmd_alloc_one(mm, address);
97525+ if (!new)
97526+ return -ENOMEM;
97527+
97528+ smp_wmb(); /* See comment in __pte_alloc */
97529+
97530+ spin_lock(&mm->page_table_lock);
97531+#ifndef __ARCH_HAS_4LEVEL_HACK
97532+ if (pud_present(*pud)) /* Another has populated it */
97533+ pmd_free(mm, new);
97534+ else
97535+ pud_populate_kernel(mm, pud, new);
97536+#else
97537+ if (pgd_present(*pud)) /* Another has populated it */
97538+ pmd_free(mm, new);
97539+ else
97540+ pgd_populate_kernel(mm, pud, new);
97541+#endif /* __ARCH_HAS_4LEVEL_HACK */
97542+ spin_unlock(&mm->page_table_lock);
97543+ return 0;
97544+}
97545 #endif /* __PAGETABLE_PMD_FOLDED */
97546
97547 static int __follow_pte(struct mm_struct *mm, unsigned long address,
97548@@ -3538,8 +3839,8 @@ out:
97549 return ret;
97550 }
97551
97552-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
97553- void *buf, int len, int write)
97554+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
97555+ void *buf, size_t len, int write)
97556 {
97557 resource_size_t phys_addr;
97558 unsigned long prot = 0;
97559@@ -3565,8 +3866,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
97560 * Access another process' address space as given in mm. If non-NULL, use the
97561 * given task for page fault accounting.
97562 */
97563-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
97564- unsigned long addr, void *buf, int len, int write)
97565+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
97566+ unsigned long addr, void *buf, size_t len, int write)
97567 {
97568 struct vm_area_struct *vma;
97569 void *old_buf = buf;
97570@@ -3574,7 +3875,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
97571 down_read(&mm->mmap_sem);
97572 /* ignore errors, just check how much was successfully transferred */
97573 while (len) {
97574- int bytes, ret, offset;
97575+ ssize_t bytes, ret, offset;
97576 void *maddr;
97577 struct page *page = NULL;
97578
97579@@ -3635,8 +3936,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
97580 *
97581 * The caller must hold a reference on @mm.
97582 */
97583-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
97584- void *buf, int len, int write)
97585+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
97586+ void *buf, size_t len, int write)
97587 {
97588 return __access_remote_vm(NULL, mm, addr, buf, len, write);
97589 }
97590@@ -3646,11 +3947,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
97591 * Source/target buffer must be kernel space,
97592 * Do not walk the page table directly, use get_user_pages
97593 */
97594-int access_process_vm(struct task_struct *tsk, unsigned long addr,
97595- void *buf, int len, int write)
97596+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
97597+ void *buf, size_t len, int write)
97598 {
97599 struct mm_struct *mm;
97600- int ret;
97601+ ssize_t ret;
97602
97603 mm = get_task_mm(tsk);
97604 if (!mm)
97605diff --git a/mm/mempolicy.c b/mm/mempolicy.c
97606index 8f5330d..b41914b 100644
97607--- a/mm/mempolicy.c
97608+++ b/mm/mempolicy.c
97609@@ -750,6 +750,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
97610 unsigned long vmstart;
97611 unsigned long vmend;
97612
97613+#ifdef CONFIG_PAX_SEGMEXEC
97614+ struct vm_area_struct *vma_m;
97615+#endif
97616+
97617 vma = find_vma(mm, start);
97618 if (!vma || vma->vm_start > start)
97619 return -EFAULT;
97620@@ -793,6 +797,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
97621 err = vma_replace_policy(vma, new_pol);
97622 if (err)
97623 goto out;
97624+
97625+#ifdef CONFIG_PAX_SEGMEXEC
97626+ vma_m = pax_find_mirror_vma(vma);
97627+ if (vma_m) {
97628+ err = vma_replace_policy(vma_m, new_pol);
97629+ if (err)
97630+ goto out;
97631+ }
97632+#endif
97633+
97634 }
97635
97636 out:
97637@@ -1225,6 +1239,17 @@ static long do_mbind(unsigned long start, unsigned long len,
97638
97639 if (end < start)
97640 return -EINVAL;
97641+
97642+#ifdef CONFIG_PAX_SEGMEXEC
97643+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
97644+ if (end > SEGMEXEC_TASK_SIZE)
97645+ return -EINVAL;
97646+ } else
97647+#endif
97648+
97649+ if (end > TASK_SIZE)
97650+ return -EINVAL;
97651+
97652 if (end == start)
97653 return 0;
97654
97655@@ -1450,8 +1475,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
97656 */
97657 tcred = __task_cred(task);
97658 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
97659- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
97660- !capable(CAP_SYS_NICE)) {
97661+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
97662 rcu_read_unlock();
97663 err = -EPERM;
97664 goto out_put;
97665@@ -1482,6 +1506,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
97666 goto out;
97667 }
97668
97669+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
97670+ if (mm != current->mm &&
97671+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
97672+ mmput(mm);
97673+ err = -EPERM;
97674+ goto out;
97675+ }
97676+#endif
97677+
97678 err = do_migrate_pages(mm, old, new,
97679 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
97680
97681diff --git a/mm/migrate.c b/mm/migrate.c
97682index 0143995..b294728 100644
97683--- a/mm/migrate.c
97684+++ b/mm/migrate.c
97685@@ -1495,8 +1495,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
97686 */
97687 tcred = __task_cred(task);
97688 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
97689- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
97690- !capable(CAP_SYS_NICE)) {
97691+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
97692 rcu_read_unlock();
97693 err = -EPERM;
97694 goto out;
97695diff --git a/mm/mlock.c b/mm/mlock.c
97696index ce84cb0..6d5a9aa 100644
97697--- a/mm/mlock.c
97698+++ b/mm/mlock.c
97699@@ -14,6 +14,7 @@
97700 #include <linux/pagevec.h>
97701 #include <linux/mempolicy.h>
97702 #include <linux/syscalls.h>
97703+#include <linux/security.h>
97704 #include <linux/sched.h>
97705 #include <linux/export.h>
97706 #include <linux/rmap.h>
97707@@ -613,7 +614,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
97708 {
97709 unsigned long nstart, end, tmp;
97710 struct vm_area_struct * vma, * prev;
97711- int error;
97712+ int error = 0;
97713
97714 VM_BUG_ON(start & ~PAGE_MASK);
97715 VM_BUG_ON(len != PAGE_ALIGN(len));
97716@@ -622,6 +623,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
97717 return -EINVAL;
97718 if (end == start)
97719 return 0;
97720+ if (end > TASK_SIZE)
97721+ return -EINVAL;
97722+
97723 vma = find_vma(current->mm, start);
97724 if (!vma || vma->vm_start > start)
97725 return -ENOMEM;
97726@@ -633,6 +637,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
97727 for (nstart = start ; ; ) {
97728 vm_flags_t newflags;
97729
97730+#ifdef CONFIG_PAX_SEGMEXEC
97731+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
97732+ break;
97733+#endif
97734+
97735 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
97736
97737 newflags = vma->vm_flags & ~VM_LOCKED;
97738@@ -746,6 +755,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
97739 locked += current->mm->locked_vm;
97740
97741 /* check against resource limits */
97742+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
97743 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
97744 error = do_mlock(start, len, 1);
97745
97746@@ -783,6 +793,11 @@ static int do_mlockall(int flags)
97747 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
97748 vm_flags_t newflags;
97749
97750+#ifdef CONFIG_PAX_SEGMEXEC
97751+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
97752+ break;
97753+#endif
97754+
97755 newflags = vma->vm_flags & ~VM_LOCKED;
97756 if (flags & MCL_CURRENT)
97757 newflags |= VM_LOCKED;
97758@@ -814,8 +829,10 @@ SYSCALL_DEFINE1(mlockall, int, flags)
97759 lock_limit >>= PAGE_SHIFT;
97760
97761 ret = -ENOMEM;
97762+
97763+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
97764+
97765 down_write(&current->mm->mmap_sem);
97766-
97767 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
97768 capable(CAP_IPC_LOCK))
97769 ret = do_mlockall(flags);
97770diff --git a/mm/mmap.c b/mm/mmap.c
97771index c0a3637..c760814 100644
97772--- a/mm/mmap.c
97773+++ b/mm/mmap.c
97774@@ -41,6 +41,7 @@
97775 #include <linux/notifier.h>
97776 #include <linux/memory.h>
97777 #include <linux/printk.h>
97778+#include <linux/random.h>
97779
97780 #include <asm/uaccess.h>
97781 #include <asm/cacheflush.h>
97782@@ -57,6 +58,16 @@
97783 #define arch_rebalance_pgtables(addr, len) (addr)
97784 #endif
97785
97786+static inline void verify_mm_writelocked(struct mm_struct *mm)
97787+{
97788+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
97789+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
97790+ up_read(&mm->mmap_sem);
97791+ BUG();
97792+ }
97793+#endif
97794+}
97795+
97796 static void unmap_region(struct mm_struct *mm,
97797 struct vm_area_struct *vma, struct vm_area_struct *prev,
97798 unsigned long start, unsigned long end);
97799@@ -76,16 +87,25 @@ static void unmap_region(struct mm_struct *mm,
97800 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
97801 *
97802 */
97803-pgprot_t protection_map[16] = {
97804+pgprot_t protection_map[16] __read_only = {
97805 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
97806 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
97807 };
97808
97809-pgprot_t vm_get_page_prot(unsigned long vm_flags)
97810+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
97811 {
97812- return __pgprot(pgprot_val(protection_map[vm_flags &
97813+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
97814 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
97815 pgprot_val(arch_vm_get_page_prot(vm_flags)));
97816+
97817+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97818+ if (!(__supported_pte_mask & _PAGE_NX) &&
97819+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
97820+ (vm_flags & (VM_READ | VM_WRITE)))
97821+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
97822+#endif
97823+
97824+ return prot;
97825 }
97826 EXPORT_SYMBOL(vm_get_page_prot);
97827
97828@@ -95,6 +115,7 @@ unsigned long sysctl_overcommit_kbytes __read_mostly;
97829 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
97830 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
97831 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
97832+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
97833 /*
97834 * Make sure vm_committed_as in one cacheline and not cacheline shared with
97835 * other variables. It can be updated by several CPUs frequently.
97836@@ -255,6 +276,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
97837 struct vm_area_struct *next = vma->vm_next;
97838
97839 might_sleep();
97840+ BUG_ON(vma->vm_mirror);
97841 if (vma->vm_ops && vma->vm_ops->close)
97842 vma->vm_ops->close(vma);
97843 if (vma->vm_file)
97844@@ -299,6 +321,12 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
97845 * not page aligned -Ram Gupta
97846 */
97847 rlim = rlimit(RLIMIT_DATA);
97848+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
97849+ /* force a minimum 16MB brk heap on setuid/setgid binaries */
97850+ if (rlim < PAGE_SIZE && (get_dumpable(mm) != SUID_DUMP_USER) && gr_is_global_nonroot(current_uid()))
97851+ rlim = 4096 * PAGE_SIZE;
97852+#endif
97853+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
97854 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
97855 (mm->end_data - mm->start_data) > rlim)
97856 goto out;
97857@@ -949,6 +977,12 @@ static int
97858 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
97859 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
97860 {
97861+
97862+#ifdef CONFIG_PAX_SEGMEXEC
97863+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
97864+ return 0;
97865+#endif
97866+
97867 if (is_mergeable_vma(vma, file, vm_flags) &&
97868 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
97869 if (vma->vm_pgoff == vm_pgoff)
97870@@ -968,6 +1002,12 @@ static int
97871 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
97872 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
97873 {
97874+
97875+#ifdef CONFIG_PAX_SEGMEXEC
97876+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
97877+ return 0;
97878+#endif
97879+
97880 if (is_mergeable_vma(vma, file, vm_flags) &&
97881 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
97882 pgoff_t vm_pglen;
97883@@ -1010,13 +1050,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
97884 struct vm_area_struct *vma_merge(struct mm_struct *mm,
97885 struct vm_area_struct *prev, unsigned long addr,
97886 unsigned long end, unsigned long vm_flags,
97887- struct anon_vma *anon_vma, struct file *file,
97888+ struct anon_vma *anon_vma, struct file *file,
97889 pgoff_t pgoff, struct mempolicy *policy)
97890 {
97891 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
97892 struct vm_area_struct *area, *next;
97893 int err;
97894
97895+#ifdef CONFIG_PAX_SEGMEXEC
97896+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
97897+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
97898+
97899+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
97900+#endif
97901+
97902 /*
97903 * We later require that vma->vm_flags == vm_flags,
97904 * so this tests vma->vm_flags & VM_SPECIAL, too.
97905@@ -1032,6 +1079,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97906 if (next && next->vm_end == end) /* cases 6, 7, 8 */
97907 next = next->vm_next;
97908
97909+#ifdef CONFIG_PAX_SEGMEXEC
97910+ if (prev)
97911+ prev_m = pax_find_mirror_vma(prev);
97912+ if (area)
97913+ area_m = pax_find_mirror_vma(area);
97914+ if (next)
97915+ next_m = pax_find_mirror_vma(next);
97916+#endif
97917+
97918 /*
97919 * Can it merge with the predecessor?
97920 */
97921@@ -1051,9 +1107,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97922 /* cases 1, 6 */
97923 err = vma_adjust(prev, prev->vm_start,
97924 next->vm_end, prev->vm_pgoff, NULL);
97925- } else /* cases 2, 5, 7 */
97926+
97927+#ifdef CONFIG_PAX_SEGMEXEC
97928+ if (!err && prev_m)
97929+ err = vma_adjust(prev_m, prev_m->vm_start,
97930+ next_m->vm_end, prev_m->vm_pgoff, NULL);
97931+#endif
97932+
97933+ } else { /* cases 2, 5, 7 */
97934 err = vma_adjust(prev, prev->vm_start,
97935 end, prev->vm_pgoff, NULL);
97936+
97937+#ifdef CONFIG_PAX_SEGMEXEC
97938+ if (!err && prev_m)
97939+ err = vma_adjust(prev_m, prev_m->vm_start,
97940+ end_m, prev_m->vm_pgoff, NULL);
97941+#endif
97942+
97943+ }
97944 if (err)
97945 return NULL;
97946 khugepaged_enter_vma_merge(prev);
97947@@ -1067,12 +1138,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97948 mpol_equal(policy, vma_policy(next)) &&
97949 can_vma_merge_before(next, vm_flags,
97950 anon_vma, file, pgoff+pglen)) {
97951- if (prev && addr < prev->vm_end) /* case 4 */
97952+ if (prev && addr < prev->vm_end) { /* case 4 */
97953 err = vma_adjust(prev, prev->vm_start,
97954 addr, prev->vm_pgoff, NULL);
97955- else /* cases 3, 8 */
97956+
97957+#ifdef CONFIG_PAX_SEGMEXEC
97958+ if (!err && prev_m)
97959+ err = vma_adjust(prev_m, prev_m->vm_start,
97960+ addr_m, prev_m->vm_pgoff, NULL);
97961+#endif
97962+
97963+ } else { /* cases 3, 8 */
97964 err = vma_adjust(area, addr, next->vm_end,
97965 next->vm_pgoff - pglen, NULL);
97966+
97967+#ifdef CONFIG_PAX_SEGMEXEC
97968+ if (!err && area_m)
97969+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
97970+ next_m->vm_pgoff - pglen, NULL);
97971+#endif
97972+
97973+ }
97974 if (err)
97975 return NULL;
97976 khugepaged_enter_vma_merge(area);
97977@@ -1181,8 +1267,10 @@ none:
97978 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
97979 struct file *file, long pages)
97980 {
97981- const unsigned long stack_flags
97982- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
97983+
97984+#ifdef CONFIG_PAX_RANDMMAP
97985+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
97986+#endif
97987
97988 mm->total_vm += pages;
97989
97990@@ -1190,7 +1278,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
97991 mm->shared_vm += pages;
97992 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
97993 mm->exec_vm += pages;
97994- } else if (flags & stack_flags)
97995+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
97996 mm->stack_vm += pages;
97997 }
97998 #endif /* CONFIG_PROC_FS */
97999@@ -1220,6 +1308,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
98000 locked += mm->locked_vm;
98001 lock_limit = rlimit(RLIMIT_MEMLOCK);
98002 lock_limit >>= PAGE_SHIFT;
98003+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
98004 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
98005 return -EAGAIN;
98006 }
98007@@ -1246,7 +1335,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
98008 * (the exception is when the underlying filesystem is noexec
98009 * mounted, in which case we dont add PROT_EXEC.)
98010 */
98011- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
98012+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
98013 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
98014 prot |= PROT_EXEC;
98015
98016@@ -1272,7 +1361,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
98017 /* Obtain the address to map to. we verify (or select) it and ensure
98018 * that it represents a valid section of the address space.
98019 */
98020- addr = get_unmapped_area(file, addr, len, pgoff, flags);
98021+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
98022 if (addr & ~PAGE_MASK)
98023 return addr;
98024
98025@@ -1283,6 +1372,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
98026 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
98027 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
98028
98029+#ifdef CONFIG_PAX_MPROTECT
98030+ if (mm->pax_flags & MF_PAX_MPROTECT) {
98031+
98032+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
98033+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
98034+ mm->binfmt->handle_mmap)
98035+ mm->binfmt->handle_mmap(file);
98036+#endif
98037+
98038+#ifndef CONFIG_PAX_MPROTECT_COMPAT
98039+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
98040+ gr_log_rwxmmap(file);
98041+
98042+#ifdef CONFIG_PAX_EMUPLT
98043+ vm_flags &= ~VM_EXEC;
98044+#else
98045+ return -EPERM;
98046+#endif
98047+
98048+ }
98049+
98050+ if (!(vm_flags & VM_EXEC))
98051+ vm_flags &= ~VM_MAYEXEC;
98052+#else
98053+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
98054+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
98055+#endif
98056+ else
98057+ vm_flags &= ~VM_MAYWRITE;
98058+ }
98059+#endif
98060+
98061+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
98062+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
98063+ vm_flags &= ~VM_PAGEEXEC;
98064+#endif
98065+
98066 if (flags & MAP_LOCKED)
98067 if (!can_do_mlock())
98068 return -EPERM;
98069@@ -1370,6 +1496,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
98070 vm_flags |= VM_NORESERVE;
98071 }
98072
98073+ if (!gr_acl_handle_mmap(file, prot))
98074+ return -EACCES;
98075+
98076 addr = mmap_region(file, addr, len, vm_flags, pgoff);
98077 if (!IS_ERR_VALUE(addr) &&
98078 ((vm_flags & VM_LOCKED) ||
98079@@ -1463,7 +1592,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
98080 vm_flags_t vm_flags = vma->vm_flags;
98081
98082 /* If it was private or non-writable, the write bit is already clear */
98083- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
98084+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
98085 return 0;
98086
98087 /* The backer wishes to know when pages are first written to? */
98088@@ -1509,7 +1638,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
98089 struct rb_node **rb_link, *rb_parent;
98090 unsigned long charged = 0;
98091
98092+#ifdef CONFIG_PAX_SEGMEXEC
98093+ struct vm_area_struct *vma_m = NULL;
98094+#endif
98095+
98096+ /*
98097+ * mm->mmap_sem is required to protect against another thread
98098+ * changing the mappings in case we sleep.
98099+ */
98100+ verify_mm_writelocked(mm);
98101+
98102 /* Check against address space limit. */
98103+
98104+#ifdef CONFIG_PAX_RANDMMAP
98105+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
98106+#endif
98107+
98108 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
98109 unsigned long nr_pages;
98110
98111@@ -1528,11 +1672,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
98112
98113 /* Clear old maps */
98114 error = -ENOMEM;
98115-munmap_back:
98116 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
98117 if (do_munmap(mm, addr, len))
98118 return -ENOMEM;
98119- goto munmap_back;
98120+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
98121 }
98122
98123 /*
98124@@ -1563,6 +1706,16 @@ munmap_back:
98125 goto unacct_error;
98126 }
98127
98128+#ifdef CONFIG_PAX_SEGMEXEC
98129+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
98130+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98131+ if (!vma_m) {
98132+ error = -ENOMEM;
98133+ goto free_vma;
98134+ }
98135+ }
98136+#endif
98137+
98138 vma->vm_mm = mm;
98139 vma->vm_start = addr;
98140 vma->vm_end = addr + len;
98141@@ -1593,6 +1746,13 @@ munmap_back:
98142 if (error)
98143 goto unmap_and_free_vma;
98144
98145+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
98146+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
98147+ vma->vm_flags |= VM_PAGEEXEC;
98148+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
98149+ }
98150+#endif
98151+
98152 /* Can addr have changed??
98153 *
98154 * Answer: Yes, several device drivers can do it in their
98155@@ -1626,6 +1786,12 @@ munmap_back:
98156 }
98157
98158 vma_link(mm, vma, prev, rb_link, rb_parent);
98159+
98160+#ifdef CONFIG_PAX_SEGMEXEC
98161+ if (vma_m)
98162+ BUG_ON(pax_mirror_vma(vma_m, vma));
98163+#endif
98164+
98165 /* Once vma denies write, undo our temporary denial count */
98166 if (file) {
98167 if (vm_flags & VM_SHARED)
98168@@ -1638,6 +1804,7 @@ out:
98169 perf_event_mmap(vma);
98170
98171 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
98172+ track_exec_limit(mm, addr, addr + len, vm_flags);
98173 if (vm_flags & VM_LOCKED) {
98174 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
98175 vma == get_gate_vma(current->mm)))
98176@@ -1673,6 +1840,12 @@ allow_write_and_free_vma:
98177 if (vm_flags & VM_DENYWRITE)
98178 allow_write_access(file);
98179 free_vma:
98180+
98181+#ifdef CONFIG_PAX_SEGMEXEC
98182+ if (vma_m)
98183+ kmem_cache_free(vm_area_cachep, vma_m);
98184+#endif
98185+
98186 kmem_cache_free(vm_area_cachep, vma);
98187 unacct_error:
98188 if (charged)
98189@@ -1680,7 +1853,63 @@ unacct_error:
98190 return error;
98191 }
98192
98193-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
98194+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
98195+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
98196+{
98197+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
98198+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
98199+
98200+ return 0;
98201+}
98202+#endif
98203+
98204+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
98205+{
98206+ if (!vma) {
98207+#ifdef CONFIG_STACK_GROWSUP
98208+ if (addr > sysctl_heap_stack_gap)
98209+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
98210+ else
98211+ vma = find_vma(current->mm, 0);
98212+ if (vma && (vma->vm_flags & VM_GROWSUP))
98213+ return false;
98214+#endif
98215+ return true;
98216+ }
98217+
98218+ if (addr + len > vma->vm_start)
98219+ return false;
98220+
98221+ if (vma->vm_flags & VM_GROWSDOWN)
98222+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
98223+#ifdef CONFIG_STACK_GROWSUP
98224+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
98225+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
98226+#endif
98227+ else if (offset)
98228+ return offset <= vma->vm_start - addr - len;
98229+
98230+ return true;
98231+}
98232+
98233+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
98234+{
98235+ if (vma->vm_start < len)
98236+ return -ENOMEM;
98237+
98238+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
98239+ if (offset <= vma->vm_start - len)
98240+ return vma->vm_start - len - offset;
98241+ else
98242+ return -ENOMEM;
98243+ }
98244+
98245+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
98246+ return vma->vm_start - len - sysctl_heap_stack_gap;
98247+ return -ENOMEM;
98248+}
98249+
98250+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
98251 {
98252 /*
98253 * We implement the search by looking for an rbtree node that
98254@@ -1728,11 +1957,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
98255 }
98256 }
98257
98258- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
98259+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
98260 check_current:
98261 /* Check if current node has a suitable gap */
98262 if (gap_start > high_limit)
98263 return -ENOMEM;
98264+
98265+ if (gap_end - gap_start > info->threadstack_offset)
98266+ gap_start += info->threadstack_offset;
98267+ else
98268+ gap_start = gap_end;
98269+
98270+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
98271+ if (gap_end - gap_start > sysctl_heap_stack_gap)
98272+ gap_start += sysctl_heap_stack_gap;
98273+ else
98274+ gap_start = gap_end;
98275+ }
98276+ if (vma->vm_flags & VM_GROWSDOWN) {
98277+ if (gap_end - gap_start > sysctl_heap_stack_gap)
98278+ gap_end -= sysctl_heap_stack_gap;
98279+ else
98280+ gap_end = gap_start;
98281+ }
98282 if (gap_end >= low_limit && gap_end - gap_start >= length)
98283 goto found;
98284
98285@@ -1782,7 +2029,7 @@ found:
98286 return gap_start;
98287 }
98288
98289-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
98290+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
98291 {
98292 struct mm_struct *mm = current->mm;
98293 struct vm_area_struct *vma;
98294@@ -1836,6 +2083,24 @@ check_current:
98295 gap_end = vma->vm_start;
98296 if (gap_end < low_limit)
98297 return -ENOMEM;
98298+
98299+ if (gap_end - gap_start > info->threadstack_offset)
98300+ gap_end -= info->threadstack_offset;
98301+ else
98302+ gap_end = gap_start;
98303+
98304+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
98305+ if (gap_end - gap_start > sysctl_heap_stack_gap)
98306+ gap_start += sysctl_heap_stack_gap;
98307+ else
98308+ gap_start = gap_end;
98309+ }
98310+ if (vma->vm_flags & VM_GROWSDOWN) {
98311+ if (gap_end - gap_start > sysctl_heap_stack_gap)
98312+ gap_end -= sysctl_heap_stack_gap;
98313+ else
98314+ gap_end = gap_start;
98315+ }
98316 if (gap_start <= high_limit && gap_end - gap_start >= length)
98317 goto found;
98318
98319@@ -1899,6 +2164,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
98320 struct mm_struct *mm = current->mm;
98321 struct vm_area_struct *vma;
98322 struct vm_unmapped_area_info info;
98323+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
98324
98325 if (len > TASK_SIZE - mmap_min_addr)
98326 return -ENOMEM;
98327@@ -1906,11 +2172,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
98328 if (flags & MAP_FIXED)
98329 return addr;
98330
98331+#ifdef CONFIG_PAX_RANDMMAP
98332+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
98333+#endif
98334+
98335 if (addr) {
98336 addr = PAGE_ALIGN(addr);
98337 vma = find_vma(mm, addr);
98338 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
98339- (!vma || addr + len <= vma->vm_start))
98340+ check_heap_stack_gap(vma, addr, len, offset))
98341 return addr;
98342 }
98343
98344@@ -1919,6 +2189,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
98345 info.low_limit = mm->mmap_base;
98346 info.high_limit = TASK_SIZE;
98347 info.align_mask = 0;
98348+ info.threadstack_offset = offset;
98349 return vm_unmapped_area(&info);
98350 }
98351 #endif
98352@@ -1937,6 +2208,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
98353 struct mm_struct *mm = current->mm;
98354 unsigned long addr = addr0;
98355 struct vm_unmapped_area_info info;
98356+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
98357
98358 /* requested length too big for entire address space */
98359 if (len > TASK_SIZE - mmap_min_addr)
98360@@ -1945,12 +2217,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
98361 if (flags & MAP_FIXED)
98362 return addr;
98363
98364+#ifdef CONFIG_PAX_RANDMMAP
98365+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
98366+#endif
98367+
98368 /* requesting a specific address */
98369 if (addr) {
98370 addr = PAGE_ALIGN(addr);
98371 vma = find_vma(mm, addr);
98372 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
98373- (!vma || addr + len <= vma->vm_start))
98374+ check_heap_stack_gap(vma, addr, len, offset))
98375 return addr;
98376 }
98377
98378@@ -1959,6 +2235,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
98379 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
98380 info.high_limit = mm->mmap_base;
98381 info.align_mask = 0;
98382+ info.threadstack_offset = offset;
98383 addr = vm_unmapped_area(&info);
98384
98385 /*
98386@@ -1971,6 +2248,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
98387 VM_BUG_ON(addr != -ENOMEM);
98388 info.flags = 0;
98389 info.low_limit = TASK_UNMAPPED_BASE;
98390+
98391+#ifdef CONFIG_PAX_RANDMMAP
98392+ if (mm->pax_flags & MF_PAX_RANDMMAP)
98393+ info.low_limit += mm->delta_mmap;
98394+#endif
98395+
98396 info.high_limit = TASK_SIZE;
98397 addr = vm_unmapped_area(&info);
98398 }
98399@@ -2071,6 +2354,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
98400 return vma;
98401 }
98402
98403+#ifdef CONFIG_PAX_SEGMEXEC
98404+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
98405+{
98406+ struct vm_area_struct *vma_m;
98407+
98408+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
98409+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
98410+ BUG_ON(vma->vm_mirror);
98411+ return NULL;
98412+ }
98413+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
98414+ vma_m = vma->vm_mirror;
98415+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
98416+ BUG_ON(vma->vm_file != vma_m->vm_file);
98417+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
98418+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
98419+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
98420+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
98421+ return vma_m;
98422+}
98423+#endif
98424+
98425 /*
98426 * Verify that the stack growth is acceptable and
98427 * update accounting. This is shared with both the
98428@@ -2087,6 +2392,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
98429 return -ENOMEM;
98430
98431 /* Stack limit test */
98432+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
98433 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
98434 return -ENOMEM;
98435
98436@@ -2097,6 +2403,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
98437 locked = mm->locked_vm + grow;
98438 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
98439 limit >>= PAGE_SHIFT;
98440+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
98441 if (locked > limit && !capable(CAP_IPC_LOCK))
98442 return -ENOMEM;
98443 }
98444@@ -2126,37 +2433,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
98445 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
98446 * vma is the last one with address > vma->vm_end. Have to extend vma.
98447 */
98448+#ifndef CONFIG_IA64
98449+static
98450+#endif
98451 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
98452 {
98453 int error;
98454+ bool locknext;
98455
98456 if (!(vma->vm_flags & VM_GROWSUP))
98457 return -EFAULT;
98458
98459+ /* Also guard against wrapping around to address 0. */
98460+ if (address < PAGE_ALIGN(address+1))
98461+ address = PAGE_ALIGN(address+1);
98462+ else
98463+ return -ENOMEM;
98464+
98465 /*
98466 * We must make sure the anon_vma is allocated
98467 * so that the anon_vma locking is not a noop.
98468 */
98469 if (unlikely(anon_vma_prepare(vma)))
98470 return -ENOMEM;
98471+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
98472+ if (locknext && anon_vma_prepare(vma->vm_next))
98473+ return -ENOMEM;
98474 vma_lock_anon_vma(vma);
98475+ if (locknext)
98476+ vma_lock_anon_vma(vma->vm_next);
98477
98478 /*
98479 * vma->vm_start/vm_end cannot change under us because the caller
98480 * is required to hold the mmap_sem in read mode. We need the
98481- * anon_vma lock to serialize against concurrent expand_stacks.
98482- * Also guard against wrapping around to address 0.
98483+ * anon_vma locks to serialize against concurrent expand_stacks
98484+ * and expand_upwards.
98485 */
98486- if (address < PAGE_ALIGN(address+4))
98487- address = PAGE_ALIGN(address+4);
98488- else {
98489- vma_unlock_anon_vma(vma);
98490- return -ENOMEM;
98491- }
98492 error = 0;
98493
98494 /* Somebody else might have raced and expanded it already */
98495- if (address > vma->vm_end) {
98496+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
98497+ error = -ENOMEM;
98498+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
98499 unsigned long size, grow;
98500
98501 size = address - vma->vm_start;
98502@@ -2191,6 +2509,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
98503 }
98504 }
98505 }
98506+ if (locknext)
98507+ vma_unlock_anon_vma(vma->vm_next);
98508 vma_unlock_anon_vma(vma);
98509 khugepaged_enter_vma_merge(vma);
98510 validate_mm(vma->vm_mm);
98511@@ -2205,6 +2525,8 @@ int expand_downwards(struct vm_area_struct *vma,
98512 unsigned long address)
98513 {
98514 int error;
98515+ bool lockprev = false;
98516+ struct vm_area_struct *prev;
98517
98518 /*
98519 * We must make sure the anon_vma is allocated
98520@@ -2218,6 +2540,15 @@ int expand_downwards(struct vm_area_struct *vma,
98521 if (error)
98522 return error;
98523
98524+ prev = vma->vm_prev;
98525+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
98526+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
98527+#endif
98528+ if (lockprev && anon_vma_prepare(prev))
98529+ return -ENOMEM;
98530+ if (lockprev)
98531+ vma_lock_anon_vma(prev);
98532+
98533 vma_lock_anon_vma(vma);
98534
98535 /*
98536@@ -2227,9 +2558,17 @@ int expand_downwards(struct vm_area_struct *vma,
98537 */
98538
98539 /* Somebody else might have raced and expanded it already */
98540- if (address < vma->vm_start) {
98541+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
98542+ error = -ENOMEM;
98543+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
98544 unsigned long size, grow;
98545
98546+#ifdef CONFIG_PAX_SEGMEXEC
98547+ struct vm_area_struct *vma_m;
98548+
98549+ vma_m = pax_find_mirror_vma(vma);
98550+#endif
98551+
98552 size = vma->vm_end - address;
98553 grow = (vma->vm_start - address) >> PAGE_SHIFT;
98554
98555@@ -2254,13 +2593,27 @@ int expand_downwards(struct vm_area_struct *vma,
98556 vma->vm_pgoff -= grow;
98557 anon_vma_interval_tree_post_update_vma(vma);
98558 vma_gap_update(vma);
98559+
98560+#ifdef CONFIG_PAX_SEGMEXEC
98561+ if (vma_m) {
98562+ anon_vma_interval_tree_pre_update_vma(vma_m);
98563+ vma_m->vm_start -= grow << PAGE_SHIFT;
98564+ vma_m->vm_pgoff -= grow;
98565+ anon_vma_interval_tree_post_update_vma(vma_m);
98566+ vma_gap_update(vma_m);
98567+ }
98568+#endif
98569+
98570 spin_unlock(&vma->vm_mm->page_table_lock);
98571
98572+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
98573 perf_event_mmap(vma);
98574 }
98575 }
98576 }
98577 vma_unlock_anon_vma(vma);
98578+ if (lockprev)
98579+ vma_unlock_anon_vma(prev);
98580 khugepaged_enter_vma_merge(vma);
98581 validate_mm(vma->vm_mm);
98582 return error;
98583@@ -2358,6 +2711,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
98584 do {
98585 long nrpages = vma_pages(vma);
98586
98587+#ifdef CONFIG_PAX_SEGMEXEC
98588+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
98589+ vma = remove_vma(vma);
98590+ continue;
98591+ }
98592+#endif
98593+
98594 if (vma->vm_flags & VM_ACCOUNT)
98595 nr_accounted += nrpages;
98596 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
98597@@ -2402,6 +2762,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
98598 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
98599 vma->vm_prev = NULL;
98600 do {
98601+
98602+#ifdef CONFIG_PAX_SEGMEXEC
98603+ if (vma->vm_mirror) {
98604+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
98605+ vma->vm_mirror->vm_mirror = NULL;
98606+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
98607+ vma->vm_mirror = NULL;
98608+ }
98609+#endif
98610+
98611 vma_rb_erase(vma, &mm->mm_rb);
98612 mm->map_count--;
98613 tail_vma = vma;
98614@@ -2429,14 +2799,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
98615 struct vm_area_struct *new;
98616 int err = -ENOMEM;
98617
98618+#ifdef CONFIG_PAX_SEGMEXEC
98619+ struct vm_area_struct *vma_m, *new_m = NULL;
98620+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
98621+#endif
98622+
98623 if (is_vm_hugetlb_page(vma) && (addr &
98624 ~(huge_page_mask(hstate_vma(vma)))))
98625 return -EINVAL;
98626
98627+#ifdef CONFIG_PAX_SEGMEXEC
98628+ vma_m = pax_find_mirror_vma(vma);
98629+#endif
98630+
98631 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
98632 if (!new)
98633 goto out_err;
98634
98635+#ifdef CONFIG_PAX_SEGMEXEC
98636+ if (vma_m) {
98637+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
98638+ if (!new_m) {
98639+ kmem_cache_free(vm_area_cachep, new);
98640+ goto out_err;
98641+ }
98642+ }
98643+#endif
98644+
98645 /* most fields are the same, copy all, and then fixup */
98646 *new = *vma;
98647
98648@@ -2449,6 +2838,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
98649 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
98650 }
98651
98652+#ifdef CONFIG_PAX_SEGMEXEC
98653+ if (vma_m) {
98654+ *new_m = *vma_m;
98655+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
98656+ new_m->vm_mirror = new;
98657+ new->vm_mirror = new_m;
98658+
98659+ if (new_below)
98660+ new_m->vm_end = addr_m;
98661+ else {
98662+ new_m->vm_start = addr_m;
98663+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
98664+ }
98665+ }
98666+#endif
98667+
98668 err = vma_dup_policy(vma, new);
98669 if (err)
98670 goto out_free_vma;
98671@@ -2468,6 +2873,38 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
98672 else
98673 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
98674
98675+#ifdef CONFIG_PAX_SEGMEXEC
98676+ if (!err && vma_m) {
98677+ struct mempolicy *pol = vma_policy(new);
98678+
98679+ if (anon_vma_clone(new_m, vma_m))
98680+ goto out_free_mpol;
98681+
98682+ mpol_get(pol);
98683+ set_vma_policy(new_m, pol);
98684+
98685+ if (new_m->vm_file)
98686+ get_file(new_m->vm_file);
98687+
98688+ if (new_m->vm_ops && new_m->vm_ops->open)
98689+ new_m->vm_ops->open(new_m);
98690+
98691+ if (new_below)
98692+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
98693+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
98694+ else
98695+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
98696+
98697+ if (err) {
98698+ if (new_m->vm_ops && new_m->vm_ops->close)
98699+ new_m->vm_ops->close(new_m);
98700+ if (new_m->vm_file)
98701+ fput(new_m->vm_file);
98702+ mpol_put(pol);
98703+ }
98704+ }
98705+#endif
98706+
98707 /* Success. */
98708 if (!err)
98709 return 0;
98710@@ -2477,10 +2914,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
98711 new->vm_ops->close(new);
98712 if (new->vm_file)
98713 fput(new->vm_file);
98714- unlink_anon_vmas(new);
98715 out_free_mpol:
98716 mpol_put(vma_policy(new));
98717 out_free_vma:
98718+
98719+#ifdef CONFIG_PAX_SEGMEXEC
98720+ if (new_m) {
98721+ unlink_anon_vmas(new_m);
98722+ kmem_cache_free(vm_area_cachep, new_m);
98723+ }
98724+#endif
98725+
98726+ unlink_anon_vmas(new);
98727 kmem_cache_free(vm_area_cachep, new);
98728 out_err:
98729 return err;
98730@@ -2493,6 +2938,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
98731 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98732 unsigned long addr, int new_below)
98733 {
98734+
98735+#ifdef CONFIG_PAX_SEGMEXEC
98736+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
98737+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
98738+ if (mm->map_count >= sysctl_max_map_count-1)
98739+ return -ENOMEM;
98740+ } else
98741+#endif
98742+
98743 if (mm->map_count >= sysctl_max_map_count)
98744 return -ENOMEM;
98745
98746@@ -2504,11 +2958,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98747 * work. This now handles partial unmappings.
98748 * Jeremy Fitzhardinge <jeremy@goop.org>
98749 */
98750+#ifdef CONFIG_PAX_SEGMEXEC
98751 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
98752 {
98753+ int ret = __do_munmap(mm, start, len);
98754+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
98755+ return ret;
98756+
98757+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
98758+}
98759+
98760+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
98761+#else
98762+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
98763+#endif
98764+{
98765 unsigned long end;
98766 struct vm_area_struct *vma, *prev, *last;
98767
98768+ /*
98769+ * mm->mmap_sem is required to protect against another thread
98770+ * changing the mappings in case we sleep.
98771+ */
98772+ verify_mm_writelocked(mm);
98773+
98774 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
98775 return -EINVAL;
98776
98777@@ -2583,6 +3056,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
98778 /* Fix up all other VM information */
98779 remove_vma_list(mm, vma);
98780
98781+ track_exec_limit(mm, start, end, 0UL);
98782+
98783 return 0;
98784 }
98785
98786@@ -2591,6 +3066,13 @@ int vm_munmap(unsigned long start, size_t len)
98787 int ret;
98788 struct mm_struct *mm = current->mm;
98789
98790+
98791+#ifdef CONFIG_PAX_SEGMEXEC
98792+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
98793+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
98794+ return -EINVAL;
98795+#endif
98796+
98797 down_write(&mm->mmap_sem);
98798 ret = do_munmap(mm, start, len);
98799 up_write(&mm->mmap_sem);
98800@@ -2604,16 +3086,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
98801 return vm_munmap(addr, len);
98802 }
98803
98804-static inline void verify_mm_writelocked(struct mm_struct *mm)
98805-{
98806-#ifdef CONFIG_DEBUG_VM
98807- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
98808- WARN_ON(1);
98809- up_read(&mm->mmap_sem);
98810- }
98811-#endif
98812-}
98813-
98814 /*
98815 * this is really a simplified "do_mmap". it only handles
98816 * anonymous maps. eventually we may be able to do some
98817@@ -2627,6 +3099,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98818 struct rb_node ** rb_link, * rb_parent;
98819 pgoff_t pgoff = addr >> PAGE_SHIFT;
98820 int error;
98821+ unsigned long charged;
98822
98823 len = PAGE_ALIGN(len);
98824 if (!len)
98825@@ -2634,10 +3107,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98826
98827 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
98828
98829+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
98830+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
98831+ flags &= ~VM_EXEC;
98832+
98833+#ifdef CONFIG_PAX_MPROTECT
98834+ if (mm->pax_flags & MF_PAX_MPROTECT)
98835+ flags &= ~VM_MAYEXEC;
98836+#endif
98837+
98838+ }
98839+#endif
98840+
98841 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
98842 if (error & ~PAGE_MASK)
98843 return error;
98844
98845+ charged = len >> PAGE_SHIFT;
98846+
98847 error = mlock_future_check(mm, mm->def_flags, len);
98848 if (error)
98849 return error;
98850@@ -2651,21 +3138,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98851 /*
98852 * Clear old maps. this also does some error checking for us
98853 */
98854- munmap_back:
98855 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
98856 if (do_munmap(mm, addr, len))
98857 return -ENOMEM;
98858- goto munmap_back;
98859+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
98860 }
98861
98862 /* Check against address space limits *after* clearing old maps... */
98863- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
98864+ if (!may_expand_vm(mm, charged))
98865 return -ENOMEM;
98866
98867 if (mm->map_count > sysctl_max_map_count)
98868 return -ENOMEM;
98869
98870- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
98871+ if (security_vm_enough_memory_mm(mm, charged))
98872 return -ENOMEM;
98873
98874 /* Can we just expand an old private anonymous mapping? */
98875@@ -2679,7 +3165,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98876 */
98877 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98878 if (!vma) {
98879- vm_unacct_memory(len >> PAGE_SHIFT);
98880+ vm_unacct_memory(charged);
98881 return -ENOMEM;
98882 }
98883
98884@@ -2693,10 +3179,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98885 vma_link(mm, vma, prev, rb_link, rb_parent);
98886 out:
98887 perf_event_mmap(vma);
98888- mm->total_vm += len >> PAGE_SHIFT;
98889+ mm->total_vm += charged;
98890 if (flags & VM_LOCKED)
98891- mm->locked_vm += (len >> PAGE_SHIFT);
98892+ mm->locked_vm += charged;
98893 vma->vm_flags |= VM_SOFTDIRTY;
98894+ track_exec_limit(mm, addr, addr + len, flags);
98895 return addr;
98896 }
98897
98898@@ -2758,6 +3245,7 @@ void exit_mmap(struct mm_struct *mm)
98899 while (vma) {
98900 if (vma->vm_flags & VM_ACCOUNT)
98901 nr_accounted += vma_pages(vma);
98902+ vma->vm_mirror = NULL;
98903 vma = remove_vma(vma);
98904 }
98905 vm_unacct_memory(nr_accounted);
98906@@ -2775,6 +3263,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
98907 struct vm_area_struct *prev;
98908 struct rb_node **rb_link, *rb_parent;
98909
98910+#ifdef CONFIG_PAX_SEGMEXEC
98911+ struct vm_area_struct *vma_m = NULL;
98912+#endif
98913+
98914+ if (security_mmap_addr(vma->vm_start))
98915+ return -EPERM;
98916+
98917 /*
98918 * The vm_pgoff of a purely anonymous vma should be irrelevant
98919 * until its first write fault, when page's anon_vma and index
98920@@ -2798,7 +3293,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
98921 security_vm_enough_memory_mm(mm, vma_pages(vma)))
98922 return -ENOMEM;
98923
98924+#ifdef CONFIG_PAX_SEGMEXEC
98925+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
98926+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98927+ if (!vma_m)
98928+ return -ENOMEM;
98929+ }
98930+#endif
98931+
98932 vma_link(mm, vma, prev, rb_link, rb_parent);
98933+
98934+#ifdef CONFIG_PAX_SEGMEXEC
98935+ if (vma_m)
98936+ BUG_ON(pax_mirror_vma(vma_m, vma));
98937+#endif
98938+
98939 return 0;
98940 }
98941
98942@@ -2817,6 +3326,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
98943 struct rb_node **rb_link, *rb_parent;
98944 bool faulted_in_anon_vma = true;
98945
98946+ BUG_ON(vma->vm_mirror);
98947+
98948 /*
98949 * If anonymous vma has not yet been faulted, update new pgoff
98950 * to match new location, to increase its chance of merging.
98951@@ -2881,6 +3392,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
98952 return NULL;
98953 }
98954
98955+#ifdef CONFIG_PAX_SEGMEXEC
98956+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
98957+{
98958+ struct vm_area_struct *prev_m;
98959+ struct rb_node **rb_link_m, *rb_parent_m;
98960+ struct mempolicy *pol_m;
98961+
98962+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
98963+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
98964+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
98965+ *vma_m = *vma;
98966+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
98967+ if (anon_vma_clone(vma_m, vma))
98968+ return -ENOMEM;
98969+ pol_m = vma_policy(vma_m);
98970+ mpol_get(pol_m);
98971+ set_vma_policy(vma_m, pol_m);
98972+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
98973+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
98974+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
98975+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
98976+ if (vma_m->vm_file)
98977+ get_file(vma_m->vm_file);
98978+ if (vma_m->vm_ops && vma_m->vm_ops->open)
98979+ vma_m->vm_ops->open(vma_m);
98980+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
98981+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
98982+ vma_m->vm_mirror = vma;
98983+ vma->vm_mirror = vma_m;
98984+ return 0;
98985+}
98986+#endif
98987+
98988 /*
98989 * Return true if the calling process may expand its vm space by the passed
98990 * number of pages
98991@@ -2892,6 +3436,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
98992
98993 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
98994
98995+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
98996 if (cur + npages > lim)
98997 return 0;
98998 return 1;
98999@@ -2974,6 +3519,22 @@ static struct vm_area_struct *__install_special_mapping(
99000 vma->vm_start = addr;
99001 vma->vm_end = addr + len;
99002
99003+#ifdef CONFIG_PAX_MPROTECT
99004+ if (mm->pax_flags & MF_PAX_MPROTECT) {
99005+#ifndef CONFIG_PAX_MPROTECT_COMPAT
99006+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
99007+ return ERR_PTR(-EPERM);
99008+ if (!(vm_flags & VM_EXEC))
99009+ vm_flags &= ~VM_MAYEXEC;
99010+#else
99011+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
99012+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
99013+#endif
99014+ else
99015+ vm_flags &= ~VM_MAYWRITE;
99016+ }
99017+#endif
99018+
99019 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
99020 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
99021
99022diff --git a/mm/mprotect.c b/mm/mprotect.c
99023index c43d557..0b7ccd2 100644
99024--- a/mm/mprotect.c
99025+++ b/mm/mprotect.c
99026@@ -24,10 +24,18 @@
99027 #include <linux/migrate.h>
99028 #include <linux/perf_event.h>
99029 #include <linux/ksm.h>
99030+#include <linux/sched/sysctl.h>
99031+
99032+#ifdef CONFIG_PAX_MPROTECT
99033+#include <linux/elf.h>
99034+#include <linux/binfmts.h>
99035+#endif
99036+
99037 #include <asm/uaccess.h>
99038 #include <asm/pgtable.h>
99039 #include <asm/cacheflush.h>
99040 #include <asm/tlbflush.h>
99041+#include <asm/mmu_context.h>
99042
99043 #ifndef pgprot_modify
99044 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
99045@@ -256,6 +264,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
99046 return pages;
99047 }
99048
99049+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
99050+/* called while holding the mmap semaphor for writing except stack expansion */
99051+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
99052+{
99053+ unsigned long oldlimit, newlimit = 0UL;
99054+
99055+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
99056+ return;
99057+
99058+ spin_lock(&mm->page_table_lock);
99059+ oldlimit = mm->context.user_cs_limit;
99060+ if ((prot & VM_EXEC) && oldlimit < end)
99061+ /* USER_CS limit moved up */
99062+ newlimit = end;
99063+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
99064+ /* USER_CS limit moved down */
99065+ newlimit = start;
99066+
99067+ if (newlimit) {
99068+ mm->context.user_cs_limit = newlimit;
99069+
99070+#ifdef CONFIG_SMP
99071+ wmb();
99072+ cpus_clear(mm->context.cpu_user_cs_mask);
99073+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
99074+#endif
99075+
99076+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
99077+ }
99078+ spin_unlock(&mm->page_table_lock);
99079+ if (newlimit == end) {
99080+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
99081+
99082+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
99083+ if (is_vm_hugetlb_page(vma))
99084+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
99085+ else
99086+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
99087+ }
99088+}
99089+#endif
99090+
99091 int
99092 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
99093 unsigned long start, unsigned long end, unsigned long newflags)
99094@@ -268,11 +318,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
99095 int error;
99096 int dirty_accountable = 0;
99097
99098+#ifdef CONFIG_PAX_SEGMEXEC
99099+ struct vm_area_struct *vma_m = NULL;
99100+ unsigned long start_m, end_m;
99101+
99102+ start_m = start + SEGMEXEC_TASK_SIZE;
99103+ end_m = end + SEGMEXEC_TASK_SIZE;
99104+#endif
99105+
99106 if (newflags == oldflags) {
99107 *pprev = vma;
99108 return 0;
99109 }
99110
99111+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
99112+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
99113+
99114+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
99115+ return -ENOMEM;
99116+
99117+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
99118+ return -ENOMEM;
99119+ }
99120+
99121 /*
99122 * If we make a private mapping writable we increase our commit;
99123 * but (without finer accounting) cannot reduce our commit if we
99124@@ -289,6 +357,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
99125 }
99126 }
99127
99128+#ifdef CONFIG_PAX_SEGMEXEC
99129+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
99130+ if (start != vma->vm_start) {
99131+ error = split_vma(mm, vma, start, 1);
99132+ if (error)
99133+ goto fail;
99134+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
99135+ *pprev = (*pprev)->vm_next;
99136+ }
99137+
99138+ if (end != vma->vm_end) {
99139+ error = split_vma(mm, vma, end, 0);
99140+ if (error)
99141+ goto fail;
99142+ }
99143+
99144+ if (pax_find_mirror_vma(vma)) {
99145+ error = __do_munmap(mm, start_m, end_m - start_m);
99146+ if (error)
99147+ goto fail;
99148+ } else {
99149+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
99150+ if (!vma_m) {
99151+ error = -ENOMEM;
99152+ goto fail;
99153+ }
99154+ vma->vm_flags = newflags;
99155+ error = pax_mirror_vma(vma_m, vma);
99156+ if (error) {
99157+ vma->vm_flags = oldflags;
99158+ goto fail;
99159+ }
99160+ }
99161+ }
99162+#endif
99163+
99164 /*
99165 * First try to merge with previous and/or next vma.
99166 */
99167@@ -319,9 +423,21 @@ success:
99168 * vm_flags and vm_page_prot are protected by the mmap_sem
99169 * held in write mode.
99170 */
99171+
99172+#ifdef CONFIG_PAX_SEGMEXEC
99173+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
99174+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
99175+#endif
99176+
99177 vma->vm_flags = newflags;
99178+
99179+#ifdef CONFIG_PAX_MPROTECT
99180+ if (mm->binfmt && mm->binfmt->handle_mprotect)
99181+ mm->binfmt->handle_mprotect(vma, newflags);
99182+#endif
99183+
99184 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
99185- vm_get_page_prot(newflags));
99186+ vm_get_page_prot(vma->vm_flags));
99187
99188 if (vma_wants_writenotify(vma)) {
99189 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
99190@@ -360,6 +476,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
99191 end = start + len;
99192 if (end <= start)
99193 return -ENOMEM;
99194+
99195+#ifdef CONFIG_PAX_SEGMEXEC
99196+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
99197+ if (end > SEGMEXEC_TASK_SIZE)
99198+ return -EINVAL;
99199+ } else
99200+#endif
99201+
99202+ if (end > TASK_SIZE)
99203+ return -EINVAL;
99204+
99205 if (!arch_validate_prot(prot))
99206 return -EINVAL;
99207
99208@@ -367,7 +494,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
99209 /*
99210 * Does the application expect PROT_READ to imply PROT_EXEC:
99211 */
99212- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
99213+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
99214 prot |= PROT_EXEC;
99215
99216 vm_flags = calc_vm_prot_bits(prot);
99217@@ -399,6 +526,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
99218 if (start > vma->vm_start)
99219 prev = vma;
99220
99221+#ifdef CONFIG_PAX_MPROTECT
99222+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
99223+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
99224+#endif
99225+
99226 for (nstart = start ; ; ) {
99227 unsigned long newflags;
99228
99229@@ -409,6 +541,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
99230
99231 /* newflags >> 4 shift VM_MAY% in place of VM_% */
99232 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
99233+ if (prot & (PROT_WRITE | PROT_EXEC))
99234+ gr_log_rwxmprotect(vma);
99235+
99236+ error = -EACCES;
99237+ goto out;
99238+ }
99239+
99240+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
99241 error = -EACCES;
99242 goto out;
99243 }
99244@@ -423,6 +563,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
99245 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
99246 if (error)
99247 goto out;
99248+
99249+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
99250+
99251 nstart = tmp;
99252
99253 if (nstart < prev->vm_end)
99254diff --git a/mm/mremap.c b/mm/mremap.c
99255index 05f1180..c3cde48 100644
99256--- a/mm/mremap.c
99257+++ b/mm/mremap.c
99258@@ -144,6 +144,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
99259 continue;
99260 pte = ptep_get_and_clear(mm, old_addr, old_pte);
99261 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
99262+
99263+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
99264+ if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
99265+ pte = pte_exprotect(pte);
99266+#endif
99267+
99268 pte = move_soft_dirty_pte(pte);
99269 set_pte_at(mm, new_addr, new_pte, pte);
99270 }
99271@@ -344,6 +350,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
99272 if (is_vm_hugetlb_page(vma))
99273 goto Einval;
99274
99275+#ifdef CONFIG_PAX_SEGMEXEC
99276+ if (pax_find_mirror_vma(vma))
99277+ goto Einval;
99278+#endif
99279+
99280 /* We can't remap across vm area boundaries */
99281 if (old_len > vma->vm_end - addr)
99282 goto Efault;
99283@@ -399,20 +410,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
99284 unsigned long ret = -EINVAL;
99285 unsigned long charged = 0;
99286 unsigned long map_flags;
99287+ unsigned long pax_task_size = TASK_SIZE;
99288
99289 if (new_addr & ~PAGE_MASK)
99290 goto out;
99291
99292- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
99293+#ifdef CONFIG_PAX_SEGMEXEC
99294+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
99295+ pax_task_size = SEGMEXEC_TASK_SIZE;
99296+#endif
99297+
99298+ pax_task_size -= PAGE_SIZE;
99299+
99300+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
99301 goto out;
99302
99303 /* Check if the location we're moving into overlaps the
99304 * old location at all, and fail if it does.
99305 */
99306- if ((new_addr <= addr) && (new_addr+new_len) > addr)
99307- goto out;
99308-
99309- if ((addr <= new_addr) && (addr+old_len) > new_addr)
99310+ if (addr + old_len > new_addr && new_addr + new_len > addr)
99311 goto out;
99312
99313 ret = do_munmap(mm, new_addr, new_len);
99314@@ -481,6 +497,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
99315 unsigned long ret = -EINVAL;
99316 unsigned long charged = 0;
99317 bool locked = false;
99318+ unsigned long pax_task_size = TASK_SIZE;
99319
99320 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
99321 return ret;
99322@@ -502,6 +519,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
99323 if (!new_len)
99324 return ret;
99325
99326+#ifdef CONFIG_PAX_SEGMEXEC
99327+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
99328+ pax_task_size = SEGMEXEC_TASK_SIZE;
99329+#endif
99330+
99331+ pax_task_size -= PAGE_SIZE;
99332+
99333+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
99334+ old_len > pax_task_size || addr > pax_task_size-old_len)
99335+ return ret;
99336+
99337 down_write(&current->mm->mmap_sem);
99338
99339 if (flags & MREMAP_FIXED) {
99340@@ -552,6 +580,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
99341 new_addr = addr;
99342 }
99343 ret = addr;
99344+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
99345 goto out;
99346 }
99347 }
99348@@ -575,7 +604,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
99349 goto out;
99350 }
99351
99352+ map_flags = vma->vm_flags;
99353 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
99354+ if (!(ret & ~PAGE_MASK)) {
99355+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
99356+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
99357+ }
99358 }
99359 out:
99360 if (ret & ~PAGE_MASK)
99361diff --git a/mm/nommu.c b/mm/nommu.c
99362index a881d96..e5932cd 100644
99363--- a/mm/nommu.c
99364+++ b/mm/nommu.c
99365@@ -70,7 +70,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
99366 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
99367 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
99368 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
99369-int heap_stack_gap = 0;
99370
99371 atomic_long_t mmap_pages_allocated;
99372
99373@@ -857,15 +856,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
99374 EXPORT_SYMBOL(find_vma);
99375
99376 /*
99377- * find a VMA
99378- * - we don't extend stack VMAs under NOMMU conditions
99379- */
99380-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
99381-{
99382- return find_vma(mm, addr);
99383-}
99384-
99385-/*
99386 * expand a stack to a given address
99387 * - not supported under NOMMU conditions
99388 */
99389@@ -1572,6 +1562,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
99390
99391 /* most fields are the same, copy all, and then fixup */
99392 *new = *vma;
99393+ INIT_LIST_HEAD(&new->anon_vma_chain);
99394 *region = *vma->vm_region;
99395 new->vm_region = region;
99396
99397@@ -2002,8 +1993,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
99398 }
99399 EXPORT_SYMBOL(generic_file_remap_pages);
99400
99401-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
99402- unsigned long addr, void *buf, int len, int write)
99403+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
99404+ unsigned long addr, void *buf, size_t len, int write)
99405 {
99406 struct vm_area_struct *vma;
99407
99408@@ -2044,8 +2035,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
99409 *
99410 * The caller must hold a reference on @mm.
99411 */
99412-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
99413- void *buf, int len, int write)
99414+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
99415+ void *buf, size_t len, int write)
99416 {
99417 return __access_remote_vm(NULL, mm, addr, buf, len, write);
99418 }
99419@@ -2054,7 +2045,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
99420 * Access another process' address space.
99421 * - source/target buffer must be kernel space
99422 */
99423-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
99424+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
99425 {
99426 struct mm_struct *mm;
99427
99428diff --git a/mm/page-writeback.c b/mm/page-writeback.c
99429index 91d73ef..0e564d2 100644
99430--- a/mm/page-writeback.c
99431+++ b/mm/page-writeback.c
99432@@ -664,7 +664,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
99433 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
99434 * - the bdi dirty thresh drops quickly due to change of JBOD workload
99435 */
99436-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
99437+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
99438 unsigned long thresh,
99439 unsigned long bg_thresh,
99440 unsigned long dirty,
99441diff --git a/mm/page_alloc.c b/mm/page_alloc.c
99442index eee9619..155d328 100644
99443--- a/mm/page_alloc.c
99444+++ b/mm/page_alloc.c
99445@@ -61,6 +61,7 @@
99446 #include <linux/page-debug-flags.h>
99447 #include <linux/hugetlb.h>
99448 #include <linux/sched/rt.h>
99449+#include <linux/random.h>
99450
99451 #include <asm/sections.h>
99452 #include <asm/tlbflush.h>
99453@@ -357,7 +358,7 @@ out:
99454 * This usage means that zero-order pages may not be compound.
99455 */
99456
99457-static void free_compound_page(struct page *page)
99458+void free_compound_page(struct page *page)
99459 {
99460 __free_pages_ok(page, compound_order(page));
99461 }
99462@@ -751,6 +752,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
99463 int i;
99464 int bad = 0;
99465
99466+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99467+ unsigned long index = 1UL << order;
99468+#endif
99469+
99470 trace_mm_page_free(page, order);
99471 kmemcheck_free_shadow(page, order);
99472
99473@@ -767,6 +772,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
99474 debug_check_no_obj_freed(page_address(page),
99475 PAGE_SIZE << order);
99476 }
99477+
99478+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99479+ for (; index; --index)
99480+ sanitize_highpage(page + index - 1);
99481+#endif
99482+
99483 arch_free_page(page, order);
99484 kernel_map_pages(page, 1 << order, 0);
99485
99486@@ -790,6 +801,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
99487 local_irq_restore(flags);
99488 }
99489
99490+#ifdef CONFIG_PAX_LATENT_ENTROPY
99491+bool __meminitdata extra_latent_entropy;
99492+
99493+static int __init setup_pax_extra_latent_entropy(char *str)
99494+{
99495+ extra_latent_entropy = true;
99496+ return 0;
99497+}
99498+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
99499+
99500+volatile u64 latent_entropy __latent_entropy;
99501+EXPORT_SYMBOL(latent_entropy);
99502+#endif
99503+
99504 void __init __free_pages_bootmem(struct page *page, unsigned int order)
99505 {
99506 unsigned int nr_pages = 1 << order;
99507@@ -805,6 +830,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
99508 __ClearPageReserved(p);
99509 set_page_count(p, 0);
99510
99511+#ifdef CONFIG_PAX_LATENT_ENTROPY
99512+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
99513+ u64 hash = 0;
99514+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
99515+ const u64 *data = lowmem_page_address(page);
99516+
99517+ for (index = 0; index < end; index++)
99518+ hash ^= hash + data[index];
99519+ latent_entropy ^= hash;
99520+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
99521+ }
99522+#endif
99523+
99524 page_zone(page)->managed_pages += nr_pages;
99525 set_page_refcounted(page);
99526 __free_pages(page, order);
99527@@ -933,8 +971,10 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
99528 arch_alloc_page(page, order);
99529 kernel_map_pages(page, 1 << order, 1);
99530
99531+#ifndef CONFIG_PAX_MEMORY_SANITIZE
99532 if (gfp_flags & __GFP_ZERO)
99533 prep_zero_page(page, order, gfp_flags);
99534+#endif
99535
99536 if (order && (gfp_flags & __GFP_COMP))
99537 prep_compound_page(page, order);
99538@@ -1612,7 +1652,7 @@ again:
99539 }
99540
99541 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
99542- if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
99543+ if (atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
99544 !zone_is_fair_depleted(zone))
99545 zone_set_flag(zone, ZONE_FAIR_DEPLETED);
99546
99547@@ -1933,7 +1973,7 @@ static void reset_alloc_batches(struct zone *preferred_zone)
99548 do {
99549 mod_zone_page_state(zone, NR_ALLOC_BATCH,
99550 high_wmark_pages(zone) - low_wmark_pages(zone) -
99551- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
99552+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
99553 zone_clear_flag(zone, ZONE_FAIR_DEPLETED);
99554 } while (zone++ != preferred_zone);
99555 }
99556@@ -5702,7 +5742,7 @@ static void __setup_per_zone_wmarks(void)
99557
99558 __mod_zone_page_state(zone, NR_ALLOC_BATCH,
99559 high_wmark_pages(zone) - low_wmark_pages(zone) -
99560- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
99561+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
99562
99563 setup_zone_migrate_reserve(zone);
99564 spin_unlock_irqrestore(&zone->lock, flags);
99565diff --git a/mm/percpu.c b/mm/percpu.c
99566index da997f9..19040e9 100644
99567--- a/mm/percpu.c
99568+++ b/mm/percpu.c
99569@@ -123,7 +123,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
99570 static unsigned int pcpu_high_unit_cpu __read_mostly;
99571
99572 /* the address of the first chunk which starts with the kernel static area */
99573-void *pcpu_base_addr __read_mostly;
99574+void *pcpu_base_addr __read_only;
99575 EXPORT_SYMBOL_GPL(pcpu_base_addr);
99576
99577 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
99578diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
99579index 5077afc..846c9ef 100644
99580--- a/mm/process_vm_access.c
99581+++ b/mm/process_vm_access.c
99582@@ -13,6 +13,7 @@
99583 #include <linux/uio.h>
99584 #include <linux/sched.h>
99585 #include <linux/highmem.h>
99586+#include <linux/security.h>
99587 #include <linux/ptrace.h>
99588 #include <linux/slab.h>
99589 #include <linux/syscalls.h>
99590@@ -157,19 +158,19 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
99591 ssize_t iov_len;
99592 size_t total_len = iov_iter_count(iter);
99593
99594+ return -ENOSYS; // PaX: until properly audited
99595+
99596 /*
99597 * Work out how many pages of struct pages we're going to need
99598 * when eventually calling get_user_pages
99599 */
99600 for (i = 0; i < riovcnt; i++) {
99601 iov_len = rvec[i].iov_len;
99602- if (iov_len > 0) {
99603- nr_pages_iov = ((unsigned long)rvec[i].iov_base
99604- + iov_len)
99605- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
99606- / PAGE_SIZE + 1;
99607- nr_pages = max(nr_pages, nr_pages_iov);
99608- }
99609+ if (iov_len <= 0)
99610+ continue;
99611+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
99612+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
99613+ nr_pages = max(nr_pages, nr_pages_iov);
99614 }
99615
99616 if (nr_pages == 0)
99617@@ -197,6 +198,11 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
99618 goto free_proc_pages;
99619 }
99620
99621+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
99622+ rc = -EPERM;
99623+ goto put_task_struct;
99624+ }
99625+
99626 mm = mm_access(task, PTRACE_MODE_ATTACH);
99627 if (!mm || IS_ERR(mm)) {
99628 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
99629diff --git a/mm/rmap.c b/mm/rmap.c
99630index 3e8491c..02abccc 100644
99631--- a/mm/rmap.c
99632+++ b/mm/rmap.c
99633@@ -164,6 +164,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
99634 struct anon_vma *anon_vma = vma->anon_vma;
99635 struct anon_vma_chain *avc;
99636
99637+#ifdef CONFIG_PAX_SEGMEXEC
99638+ struct anon_vma_chain *avc_m = NULL;
99639+#endif
99640+
99641 might_sleep();
99642 if (unlikely(!anon_vma)) {
99643 struct mm_struct *mm = vma->vm_mm;
99644@@ -173,6 +177,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
99645 if (!avc)
99646 goto out_enomem;
99647
99648+#ifdef CONFIG_PAX_SEGMEXEC
99649+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
99650+ if (!avc_m)
99651+ goto out_enomem_free_avc;
99652+#endif
99653+
99654 anon_vma = find_mergeable_anon_vma(vma);
99655 allocated = NULL;
99656 if (!anon_vma) {
99657@@ -186,6 +196,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
99658 /* page_table_lock to protect against threads */
99659 spin_lock(&mm->page_table_lock);
99660 if (likely(!vma->anon_vma)) {
99661+
99662+#ifdef CONFIG_PAX_SEGMEXEC
99663+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
99664+
99665+ if (vma_m) {
99666+ BUG_ON(vma_m->anon_vma);
99667+ vma_m->anon_vma = anon_vma;
99668+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
99669+ avc_m = NULL;
99670+ }
99671+#endif
99672+
99673 vma->anon_vma = anon_vma;
99674 anon_vma_chain_link(vma, avc, anon_vma);
99675 allocated = NULL;
99676@@ -196,12 +218,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
99677
99678 if (unlikely(allocated))
99679 put_anon_vma(allocated);
99680+
99681+#ifdef CONFIG_PAX_SEGMEXEC
99682+ if (unlikely(avc_m))
99683+ anon_vma_chain_free(avc_m);
99684+#endif
99685+
99686 if (unlikely(avc))
99687 anon_vma_chain_free(avc);
99688 }
99689 return 0;
99690
99691 out_enomem_free_avc:
99692+
99693+#ifdef CONFIG_PAX_SEGMEXEC
99694+ if (avc_m)
99695+ anon_vma_chain_free(avc_m);
99696+#endif
99697+
99698 anon_vma_chain_free(avc);
99699 out_enomem:
99700 return -ENOMEM;
99701@@ -237,7 +271,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
99702 * Attach the anon_vmas from src to dst.
99703 * Returns 0 on success, -ENOMEM on failure.
99704 */
99705-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
99706+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
99707 {
99708 struct anon_vma_chain *avc, *pavc;
99709 struct anon_vma *root = NULL;
99710@@ -270,7 +304,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
99711 * the corresponding VMA in the parent process is attached to.
99712 * Returns 0 on success, non-zero on failure.
99713 */
99714-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
99715+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
99716 {
99717 struct anon_vma_chain *avc;
99718 struct anon_vma *anon_vma;
99719@@ -374,8 +408,10 @@ static void anon_vma_ctor(void *data)
99720 void __init anon_vma_init(void)
99721 {
99722 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
99723- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
99724- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
99725+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
99726+ anon_vma_ctor);
99727+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
99728+ SLAB_PANIC|SLAB_NO_SANITIZE);
99729 }
99730
99731 /*
99732diff --git a/mm/shmem.c b/mm/shmem.c
99733index 469f90d..34a09ee 100644
99734--- a/mm/shmem.c
99735+++ b/mm/shmem.c
99736@@ -33,7 +33,7 @@
99737 #include <linux/swap.h>
99738 #include <linux/aio.h>
99739
99740-static struct vfsmount *shm_mnt;
99741+struct vfsmount *shm_mnt;
99742
99743 #ifdef CONFIG_SHMEM
99744 /*
99745@@ -80,7 +80,7 @@ static struct vfsmount *shm_mnt;
99746 #define BOGO_DIRENT_SIZE 20
99747
99748 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
99749-#define SHORT_SYMLINK_LEN 128
99750+#define SHORT_SYMLINK_LEN 64
99751
99752 /*
99753 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
99754@@ -2524,6 +2524,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
99755 static int shmem_xattr_validate(const char *name)
99756 {
99757 struct { const char *prefix; size_t len; } arr[] = {
99758+
99759+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
99760+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
99761+#endif
99762+
99763 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
99764 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
99765 };
99766@@ -2579,6 +2584,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
99767 if (err)
99768 return err;
99769
99770+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
99771+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
99772+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
99773+ return -EOPNOTSUPP;
99774+ if (size > 8)
99775+ return -EINVAL;
99776+ }
99777+#endif
99778+
99779 return simple_xattr_set(&info->xattrs, name, value, size, flags);
99780 }
99781
99782@@ -2962,8 +2976,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
99783 int err = -ENOMEM;
99784
99785 /* Round up to L1_CACHE_BYTES to resist false sharing */
99786- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
99787- L1_CACHE_BYTES), GFP_KERNEL);
99788+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
99789 if (!sbinfo)
99790 return -ENOMEM;
99791
99792diff --git a/mm/slab.c b/mm/slab.c
99793index 7c52b38..3ccc17e 100644
99794--- a/mm/slab.c
99795+++ b/mm/slab.c
99796@@ -316,10 +316,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
99797 if ((x)->max_freeable < i) \
99798 (x)->max_freeable = i; \
99799 } while (0)
99800-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
99801-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
99802-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
99803-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
99804+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
99805+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
99806+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
99807+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
99808+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
99809+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
99810 #else
99811 #define STATS_INC_ACTIVE(x) do { } while (0)
99812 #define STATS_DEC_ACTIVE(x) do { } while (0)
99813@@ -336,6 +338,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
99814 #define STATS_INC_ALLOCMISS(x) do { } while (0)
99815 #define STATS_INC_FREEHIT(x) do { } while (0)
99816 #define STATS_INC_FREEMISS(x) do { } while (0)
99817+#define STATS_INC_SANITIZED(x) do { } while (0)
99818+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
99819 #endif
99820
99821 #if DEBUG
99822@@ -452,7 +456,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
99823 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
99824 */
99825 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
99826- const struct page *page, void *obj)
99827+ const struct page *page, const void *obj)
99828 {
99829 u32 offset = (obj - page->s_mem);
99830 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
99831@@ -1462,12 +1466,12 @@ void __init kmem_cache_init(void)
99832 */
99833
99834 kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
99835- kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
99836+ kmalloc_size(INDEX_AC), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
99837
99838 if (INDEX_AC != INDEX_NODE)
99839 kmalloc_caches[INDEX_NODE] =
99840 create_kmalloc_cache("kmalloc-node",
99841- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
99842+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
99843
99844 slab_early_init = 0;
99845
99846@@ -3384,6 +3388,20 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
99847 struct array_cache *ac = cpu_cache_get(cachep);
99848
99849 check_irq_off();
99850+
99851+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99852+ if (cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))
99853+ STATS_INC_NOT_SANITIZED(cachep);
99854+ else {
99855+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
99856+
99857+ if (cachep->ctor)
99858+ cachep->ctor(objp);
99859+
99860+ STATS_INC_SANITIZED(cachep);
99861+ }
99862+#endif
99863+
99864 kmemleak_free_recursive(objp, cachep->flags);
99865 objp = cache_free_debugcheck(cachep, objp, caller);
99866
99867@@ -3607,6 +3625,7 @@ void kfree(const void *objp)
99868
99869 if (unlikely(ZERO_OR_NULL_PTR(objp)))
99870 return;
99871+ VM_BUG_ON(!virt_addr_valid(objp));
99872 local_irq_save(flags);
99873 kfree_debugcheck(objp);
99874 c = virt_to_cache(objp);
99875@@ -4056,14 +4075,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
99876 }
99877 /* cpu stats */
99878 {
99879- unsigned long allochit = atomic_read(&cachep->allochit);
99880- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
99881- unsigned long freehit = atomic_read(&cachep->freehit);
99882- unsigned long freemiss = atomic_read(&cachep->freemiss);
99883+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
99884+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
99885+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
99886+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
99887
99888 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
99889 allochit, allocmiss, freehit, freemiss);
99890 }
99891+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99892+ {
99893+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
99894+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
99895+
99896+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
99897+ }
99898+#endif
99899 #endif
99900 }
99901
99902@@ -4281,13 +4308,69 @@ static const struct file_operations proc_slabstats_operations = {
99903 static int __init slab_proc_init(void)
99904 {
99905 #ifdef CONFIG_DEBUG_SLAB_LEAK
99906- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
99907+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
99908 #endif
99909 return 0;
99910 }
99911 module_init(slab_proc_init);
99912 #endif
99913
99914+bool is_usercopy_object(const void *ptr)
99915+{
99916+ struct page *page;
99917+ struct kmem_cache *cachep;
99918+
99919+ if (ZERO_OR_NULL_PTR(ptr))
99920+ return false;
99921+
99922+ if (!slab_is_available())
99923+ return false;
99924+
99925+ if (!virt_addr_valid(ptr))
99926+ return false;
99927+
99928+ page = virt_to_head_page(ptr);
99929+
99930+ if (!PageSlab(page))
99931+ return false;
99932+
99933+ cachep = page->slab_cache;
99934+ return cachep->flags & SLAB_USERCOPY;
99935+}
99936+
99937+#ifdef CONFIG_PAX_USERCOPY
99938+const char *check_heap_object(const void *ptr, unsigned long n)
99939+{
99940+ struct page *page;
99941+ struct kmem_cache *cachep;
99942+ unsigned int objnr;
99943+ unsigned long offset;
99944+
99945+ if (ZERO_OR_NULL_PTR(ptr))
99946+ return "<null>";
99947+
99948+ if (!virt_addr_valid(ptr))
99949+ return NULL;
99950+
99951+ page = virt_to_head_page(ptr);
99952+
99953+ if (!PageSlab(page))
99954+ return NULL;
99955+
99956+ cachep = page->slab_cache;
99957+ if (!(cachep->flags & SLAB_USERCOPY))
99958+ return cachep->name;
99959+
99960+ objnr = obj_to_index(cachep, page, ptr);
99961+ BUG_ON(objnr >= cachep->num);
99962+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
99963+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
99964+ return NULL;
99965+
99966+ return cachep->name;
99967+}
99968+#endif
99969+
99970 /**
99971 * ksize - get the actual amount of memory allocated for a given object
99972 * @objp: Pointer to the object
99973diff --git a/mm/slab.h b/mm/slab.h
99974index 0e0fdd3..d0fd761 100644
99975--- a/mm/slab.h
99976+++ b/mm/slab.h
99977@@ -32,6 +32,20 @@ extern struct list_head slab_caches;
99978 /* The slab cache that manages slab cache information */
99979 extern struct kmem_cache *kmem_cache;
99980
99981+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99982+#ifdef CONFIG_X86_64
99983+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
99984+#else
99985+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
99986+#endif
99987+enum pax_sanitize_mode {
99988+ PAX_SANITIZE_SLAB_OFF = 0,
99989+ PAX_SANITIZE_SLAB_FAST,
99990+ PAX_SANITIZE_SLAB_FULL,
99991+};
99992+extern enum pax_sanitize_mode pax_sanitize_slab;
99993+#endif
99994+
99995 unsigned long calculate_alignment(unsigned long flags,
99996 unsigned long align, unsigned long size);
99997
99998@@ -67,7 +81,8 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
99999
100000 /* Legal flag mask for kmem_cache_create(), for various configurations */
100001 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
100002- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
100003+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
100004+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
100005
100006 #if defined(CONFIG_DEBUG_SLAB)
100007 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
100008@@ -251,6 +266,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
100009 return s;
100010
100011 page = virt_to_head_page(x);
100012+
100013+ BUG_ON(!PageSlab(page));
100014+
100015 cachep = page->slab_cache;
100016 if (slab_equal_or_root(cachep, s))
100017 return cachep;
100018diff --git a/mm/slab_common.c b/mm/slab_common.c
100019index d319502..da7714e 100644
100020--- a/mm/slab_common.c
100021+++ b/mm/slab_common.c
100022@@ -25,11 +25,35 @@
100023
100024 #include "slab.h"
100025
100026-enum slab_state slab_state;
100027+enum slab_state slab_state __read_only;
100028 LIST_HEAD(slab_caches);
100029 DEFINE_MUTEX(slab_mutex);
100030 struct kmem_cache *kmem_cache;
100031
100032+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100033+enum pax_sanitize_mode pax_sanitize_slab __read_only = PAX_SANITIZE_SLAB_FAST;
100034+static int __init pax_sanitize_slab_setup(char *str)
100035+{
100036+ if (!str)
100037+ return 0;
100038+
100039+ if (!strcmp(str, "0") || !strcmp(str, "off")) {
100040+ pr_info("PaX slab sanitization: %s\n", "disabled");
100041+ pax_sanitize_slab = PAX_SANITIZE_SLAB_OFF;
100042+ } else if (!strcmp(str, "1") || !strcmp(str, "fast")) {
100043+ pr_info("PaX slab sanitization: %s\n", "fast");
100044+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FAST;
100045+ } else if (!strcmp(str, "full")) {
100046+ pr_info("PaX slab sanitization: %s\n", "full");
100047+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FULL;
100048+ } else
100049+ pr_err("PaX slab sanitization: unsupported option '%s'\n", str);
100050+
100051+ return 0;
100052+}
100053+early_param("pax_sanitize_slab", pax_sanitize_slab_setup);
100054+#endif
100055+
100056 #ifdef CONFIG_DEBUG_VM
100057 static int kmem_cache_sanity_check(const char *name, size_t size)
100058 {
100059@@ -160,7 +184,7 @@ do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
100060 if (err)
100061 goto out_free_cache;
100062
100063- s->refcount = 1;
100064+ atomic_set(&s->refcount, 1);
100065 list_add(&s->list, &slab_caches);
100066 out:
100067 if (err)
100068@@ -222,6 +246,13 @@ kmem_cache_create(const char *name, size_t size, size_t align,
100069 */
100070 flags &= CACHE_CREATE_MASK;
100071
100072+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100073+ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
100074+ flags |= SLAB_NO_SANITIZE;
100075+ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
100076+ flags &= ~SLAB_NO_SANITIZE;
100077+#endif
100078+
100079 s = __kmem_cache_alias(name, size, align, flags, ctor);
100080 if (s)
100081 goto out_unlock;
100082@@ -341,8 +372,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
100083
100084 mutex_lock(&slab_mutex);
100085
100086- s->refcount--;
100087- if (s->refcount)
100088+ if (!atomic_dec_and_test(&s->refcount))
100089 goto out_unlock;
100090
100091 if (memcg_cleanup_cache_params(s) != 0)
100092@@ -362,7 +392,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
100093 rcu_barrier();
100094
100095 memcg_free_cache_params(s);
100096-#ifdef SLAB_SUPPORTS_SYSFS
100097+#if defined(SLAB_SUPPORTS_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100098 sysfs_slab_remove(s);
100099 #else
100100 slab_kmem_cache_release(s);
100101@@ -418,7 +448,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
100102 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
100103 name, size, err);
100104
100105- s->refcount = -1; /* Exempt from merging for now */
100106+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
100107 }
100108
100109 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
100110@@ -431,7 +461,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
100111
100112 create_boot_cache(s, name, size, flags);
100113 list_add(&s->list, &slab_caches);
100114- s->refcount = 1;
100115+ atomic_set(&s->refcount, 1);
100116 return s;
100117 }
100118
100119@@ -443,6 +473,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
100120 EXPORT_SYMBOL(kmalloc_dma_caches);
100121 #endif
100122
100123+#ifdef CONFIG_PAX_USERCOPY_SLABS
100124+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
100125+EXPORT_SYMBOL(kmalloc_usercopy_caches);
100126+#endif
100127+
100128 /*
100129 * Conversion table for small slabs sizes / 8 to the index in the
100130 * kmalloc array. This is necessary for slabs < 192 since we have non power
100131@@ -507,6 +542,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
100132 return kmalloc_dma_caches[index];
100133
100134 #endif
100135+
100136+#ifdef CONFIG_PAX_USERCOPY_SLABS
100137+ if (unlikely((flags & GFP_USERCOPY)))
100138+ return kmalloc_usercopy_caches[index];
100139+
100140+#endif
100141+
100142 return kmalloc_caches[index];
100143 }
100144
100145@@ -563,7 +605,7 @@ void __init create_kmalloc_caches(unsigned long flags)
100146 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
100147 if (!kmalloc_caches[i]) {
100148 kmalloc_caches[i] = create_kmalloc_cache(NULL,
100149- 1 << i, flags);
100150+ 1 << i, SLAB_USERCOPY | flags);
100151 }
100152
100153 /*
100154@@ -572,10 +614,10 @@ void __init create_kmalloc_caches(unsigned long flags)
100155 * earlier power of two caches
100156 */
100157 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
100158- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
100159+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
100160
100161 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
100162- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
100163+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
100164 }
100165
100166 /* Kmalloc array is now usable */
100167@@ -608,6 +650,23 @@ void __init create_kmalloc_caches(unsigned long flags)
100168 }
100169 }
100170 #endif
100171+
100172+#ifdef CONFIG_PAX_USERCOPY_SLABS
100173+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
100174+ struct kmem_cache *s = kmalloc_caches[i];
100175+
100176+ if (s) {
100177+ int size = kmalloc_size(i);
100178+ char *n = kasprintf(GFP_NOWAIT,
100179+ "usercopy-kmalloc-%d", size);
100180+
100181+ BUG_ON(!n);
100182+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
100183+ size, SLAB_USERCOPY | flags);
100184+ }
100185+ }
100186+#endif
100187+
100188 }
100189 #endif /* !CONFIG_SLOB */
100190
100191@@ -666,6 +725,9 @@ void print_slabinfo_header(struct seq_file *m)
100192 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
100193 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
100194 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
100195+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100196+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
100197+#endif
100198 #endif
100199 seq_putc(m, '\n');
100200 }
100201diff --git a/mm/slob.c b/mm/slob.c
100202index 21980e0..975f1bf 100644
100203--- a/mm/slob.c
100204+++ b/mm/slob.c
100205@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
100206 /*
100207 * Return the size of a slob block.
100208 */
100209-static slobidx_t slob_units(slob_t *s)
100210+static slobidx_t slob_units(const slob_t *s)
100211 {
100212 if (s->units > 0)
100213 return s->units;
100214@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
100215 /*
100216 * Return the next free slob block pointer after this one.
100217 */
100218-static slob_t *slob_next(slob_t *s)
100219+static slob_t *slob_next(const slob_t *s)
100220 {
100221 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
100222 slobidx_t next;
100223@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
100224 /*
100225 * Returns true if s is the last free block in its page.
100226 */
100227-static int slob_last(slob_t *s)
100228+static int slob_last(const slob_t *s)
100229 {
100230 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
100231 }
100232
100233-static void *slob_new_pages(gfp_t gfp, int order, int node)
100234+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
100235 {
100236- void *page;
100237+ struct page *page;
100238
100239 #ifdef CONFIG_NUMA
100240 if (node != NUMA_NO_NODE)
100241@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
100242 if (!page)
100243 return NULL;
100244
100245- return page_address(page);
100246+ __SetPageSlab(page);
100247+ return page;
100248 }
100249
100250-static void slob_free_pages(void *b, int order)
100251+static void slob_free_pages(struct page *sp, int order)
100252 {
100253 if (current->reclaim_state)
100254 current->reclaim_state->reclaimed_slab += 1 << order;
100255- free_pages((unsigned long)b, order);
100256+ __ClearPageSlab(sp);
100257+ page_mapcount_reset(sp);
100258+ sp->private = 0;
100259+ __free_pages(sp, order);
100260 }
100261
100262 /*
100263@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
100264
100265 /* Not enough space: must allocate a new page */
100266 if (!b) {
100267- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
100268- if (!b)
100269+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
100270+ if (!sp)
100271 return NULL;
100272- sp = virt_to_page(b);
100273- __SetPageSlab(sp);
100274+ b = page_address(sp);
100275
100276 spin_lock_irqsave(&slob_lock, flags);
100277 sp->units = SLOB_UNITS(PAGE_SIZE);
100278 sp->freelist = b;
100279+ sp->private = 0;
100280 INIT_LIST_HEAD(&sp->lru);
100281 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
100282 set_slob_page_free(sp, slob_list);
100283@@ -337,7 +341,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
100284 /*
100285 * slob_free: entry point into the slob allocator.
100286 */
100287-static void slob_free(void *block, int size)
100288+static void slob_free(struct kmem_cache *c, void *block, int size)
100289 {
100290 struct page *sp;
100291 slob_t *prev, *next, *b = (slob_t *)block;
100292@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
100293 if (slob_page_free(sp))
100294 clear_slob_page_free(sp);
100295 spin_unlock_irqrestore(&slob_lock, flags);
100296- __ClearPageSlab(sp);
100297- page_mapcount_reset(sp);
100298- slob_free_pages(b, 0);
100299+ slob_free_pages(sp, 0);
100300 return;
100301 }
100302
100303+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100304+ if (pax_sanitize_slab && !(c && (c->flags & SLAB_NO_SANITIZE)))
100305+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
100306+#endif
100307+
100308 if (!slob_page_free(sp)) {
100309 /* This slob page is about to become partially free. Easy! */
100310 sp->units = units;
100311@@ -424,11 +431,10 @@ out:
100312 */
100313
100314 static __always_inline void *
100315-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
100316+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
100317 {
100318- unsigned int *m;
100319- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
100320- void *ret;
100321+ slob_t *m;
100322+ void *ret = NULL;
100323
100324 gfp &= gfp_allowed_mask;
100325
100326@@ -442,23 +448,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
100327
100328 if (!m)
100329 return NULL;
100330- *m = size;
100331+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
100332+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
100333+ m[0].units = size;
100334+ m[1].units = align;
100335 ret = (void *)m + align;
100336
100337 trace_kmalloc_node(caller, ret,
100338 size, size + align, gfp, node);
100339 } else {
100340 unsigned int order = get_order(size);
100341+ struct page *page;
100342
100343 if (likely(order))
100344 gfp |= __GFP_COMP;
100345- ret = slob_new_pages(gfp, order, node);
100346+ page = slob_new_pages(gfp, order, node);
100347+ if (page) {
100348+ ret = page_address(page);
100349+ page->private = size;
100350+ }
100351
100352 trace_kmalloc_node(caller, ret,
100353 size, PAGE_SIZE << order, gfp, node);
100354 }
100355
100356- kmemleak_alloc(ret, size, 1, gfp);
100357+ return ret;
100358+}
100359+
100360+static __always_inline void *
100361+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
100362+{
100363+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
100364+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
100365+
100366+ if (!ZERO_OR_NULL_PTR(ret))
100367+ kmemleak_alloc(ret, size, 1, gfp);
100368 return ret;
100369 }
100370
100371@@ -493,34 +517,112 @@ void kfree(const void *block)
100372 return;
100373 kmemleak_free(block);
100374
100375+ VM_BUG_ON(!virt_addr_valid(block));
100376 sp = virt_to_page(block);
100377- if (PageSlab(sp)) {
100378+ VM_BUG_ON(!PageSlab(sp));
100379+ if (!sp->private) {
100380 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
100381- unsigned int *m = (unsigned int *)(block - align);
100382- slob_free(m, *m + align);
100383- } else
100384+ slob_t *m = (slob_t *)(block - align);
100385+ slob_free(NULL, m, m[0].units + align);
100386+ } else {
100387+ __ClearPageSlab(sp);
100388+ page_mapcount_reset(sp);
100389+ sp->private = 0;
100390 __free_pages(sp, compound_order(sp));
100391+ }
100392 }
100393 EXPORT_SYMBOL(kfree);
100394
100395+bool is_usercopy_object(const void *ptr)
100396+{
100397+ if (!slab_is_available())
100398+ return false;
100399+
100400+ // PAX: TODO
100401+
100402+ return false;
100403+}
100404+
100405+#ifdef CONFIG_PAX_USERCOPY
100406+const char *check_heap_object(const void *ptr, unsigned long n)
100407+{
100408+ struct page *page;
100409+ const slob_t *free;
100410+ const void *base;
100411+ unsigned long flags;
100412+
100413+ if (ZERO_OR_NULL_PTR(ptr))
100414+ return "<null>";
100415+
100416+ if (!virt_addr_valid(ptr))
100417+ return NULL;
100418+
100419+ page = virt_to_head_page(ptr);
100420+ if (!PageSlab(page))
100421+ return NULL;
100422+
100423+ if (page->private) {
100424+ base = page;
100425+ if (base <= ptr && n <= page->private - (ptr - base))
100426+ return NULL;
100427+ return "<slob>";
100428+ }
100429+
100430+ /* some tricky double walking to find the chunk */
100431+ spin_lock_irqsave(&slob_lock, flags);
100432+ base = (void *)((unsigned long)ptr & PAGE_MASK);
100433+ free = page->freelist;
100434+
100435+ while (!slob_last(free) && (void *)free <= ptr) {
100436+ base = free + slob_units(free);
100437+ free = slob_next(free);
100438+ }
100439+
100440+ while (base < (void *)free) {
100441+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
100442+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
100443+ int offset;
100444+
100445+ if (ptr < base + align)
100446+ break;
100447+
100448+ offset = ptr - base - align;
100449+ if (offset >= m) {
100450+ base += size;
100451+ continue;
100452+ }
100453+
100454+ if (n > m - offset)
100455+ break;
100456+
100457+ spin_unlock_irqrestore(&slob_lock, flags);
100458+ return NULL;
100459+ }
100460+
100461+ spin_unlock_irqrestore(&slob_lock, flags);
100462+ return "<slob>";
100463+}
100464+#endif
100465+
100466 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
100467 size_t ksize(const void *block)
100468 {
100469 struct page *sp;
100470 int align;
100471- unsigned int *m;
100472+ slob_t *m;
100473
100474 BUG_ON(!block);
100475 if (unlikely(block == ZERO_SIZE_PTR))
100476 return 0;
100477
100478 sp = virt_to_page(block);
100479- if (unlikely(!PageSlab(sp)))
100480- return PAGE_SIZE << compound_order(sp);
100481+ VM_BUG_ON(!PageSlab(sp));
100482+ if (sp->private)
100483+ return sp->private;
100484
100485 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
100486- m = (unsigned int *)(block - align);
100487- return SLOB_UNITS(*m) * SLOB_UNIT;
100488+ m = (slob_t *)(block - align);
100489+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
100490 }
100491 EXPORT_SYMBOL(ksize);
100492
100493@@ -536,23 +638,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
100494
100495 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
100496 {
100497- void *b;
100498+ void *b = NULL;
100499
100500 flags &= gfp_allowed_mask;
100501
100502 lockdep_trace_alloc(flags);
100503
100504+#ifdef CONFIG_PAX_USERCOPY_SLABS
100505+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
100506+#else
100507 if (c->size < PAGE_SIZE) {
100508 b = slob_alloc(c->size, flags, c->align, node);
100509 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
100510 SLOB_UNITS(c->size) * SLOB_UNIT,
100511 flags, node);
100512 } else {
100513- b = slob_new_pages(flags, get_order(c->size), node);
100514+ struct page *sp;
100515+
100516+ sp = slob_new_pages(flags, get_order(c->size), node);
100517+ if (sp) {
100518+ b = page_address(sp);
100519+ sp->private = c->size;
100520+ }
100521 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
100522 PAGE_SIZE << get_order(c->size),
100523 flags, node);
100524 }
100525+#endif
100526
100527 if (b && c->ctor)
100528 c->ctor(b);
100529@@ -582,12 +694,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
100530 EXPORT_SYMBOL(kmem_cache_alloc_node);
100531 #endif
100532
100533-static void __kmem_cache_free(void *b, int size)
100534+static void __kmem_cache_free(struct kmem_cache *c, void *b, int size)
100535 {
100536- if (size < PAGE_SIZE)
100537- slob_free(b, size);
100538+ struct page *sp;
100539+
100540+ sp = virt_to_page(b);
100541+ BUG_ON(!PageSlab(sp));
100542+ if (!sp->private)
100543+ slob_free(c, b, size);
100544 else
100545- slob_free_pages(b, get_order(size));
100546+ slob_free_pages(sp, get_order(size));
100547 }
100548
100549 static void kmem_rcu_free(struct rcu_head *head)
100550@@ -595,22 +711,36 @@ static void kmem_rcu_free(struct rcu_head *head)
100551 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
100552 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
100553
100554- __kmem_cache_free(b, slob_rcu->size);
100555+ __kmem_cache_free(NULL, b, slob_rcu->size);
100556 }
100557
100558 void kmem_cache_free(struct kmem_cache *c, void *b)
100559 {
100560+ int size = c->size;
100561+
100562+#ifdef CONFIG_PAX_USERCOPY_SLABS
100563+ if (size + c->align < PAGE_SIZE) {
100564+ size += c->align;
100565+ b -= c->align;
100566+ }
100567+#endif
100568+
100569 kmemleak_free_recursive(b, c->flags);
100570 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
100571 struct slob_rcu *slob_rcu;
100572- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
100573- slob_rcu->size = c->size;
100574+ slob_rcu = b + (size - sizeof(struct slob_rcu));
100575+ slob_rcu->size = size;
100576 call_rcu(&slob_rcu->head, kmem_rcu_free);
100577 } else {
100578- __kmem_cache_free(b, c->size);
100579+ __kmem_cache_free(c, b, size);
100580 }
100581
100582+#ifdef CONFIG_PAX_USERCOPY_SLABS
100583+ trace_kfree(_RET_IP_, b);
100584+#else
100585 trace_kmem_cache_free(_RET_IP_, b);
100586+#endif
100587+
100588 }
100589 EXPORT_SYMBOL(kmem_cache_free);
100590
100591diff --git a/mm/slub.c b/mm/slub.c
100592index 3e8afcc..d6e2c89 100644
100593--- a/mm/slub.c
100594+++ b/mm/slub.c
100595@@ -207,7 +207,7 @@ struct track {
100596
100597 enum track_item { TRACK_ALLOC, TRACK_FREE };
100598
100599-#ifdef CONFIG_SYSFS
100600+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100601 static int sysfs_slab_add(struct kmem_cache *);
100602 static int sysfs_slab_alias(struct kmem_cache *, const char *);
100603 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
100604@@ -545,7 +545,7 @@ static void print_track(const char *s, struct track *t)
100605 if (!t->addr)
100606 return;
100607
100608- pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
100609+ pr_err("INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
100610 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
100611 #ifdef CONFIG_STACKTRACE
100612 {
100613@@ -2643,6 +2643,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
100614
100615 slab_free_hook(s, x);
100616
100617+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100618+ if (!(s->flags & SLAB_NO_SANITIZE)) {
100619+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
100620+ if (s->ctor)
100621+ s->ctor(x);
100622+ }
100623+#endif
100624+
100625 redo:
100626 /*
100627 * Determine the currently cpus per cpu slab.
100628@@ -2710,7 +2718,7 @@ static int slub_min_objects;
100629 * Merge control. If this is set then no merging of slab caches will occur.
100630 * (Could be removed. This was introduced to pacify the merge skeptics.)
100631 */
100632-static int slub_nomerge;
100633+static int slub_nomerge = 1;
100634
100635 /*
100636 * Calculate the order of allocation given an slab object size.
100637@@ -2986,6 +2994,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
100638 s->inuse = size;
100639
100640 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
100641+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100642+ (!(flags & SLAB_NO_SANITIZE)) ||
100643+#endif
100644 s->ctor)) {
100645 /*
100646 * Relocate free pointer after the object if it is not
100647@@ -3313,6 +3324,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
100648 EXPORT_SYMBOL(__kmalloc_node);
100649 #endif
100650
100651+bool is_usercopy_object(const void *ptr)
100652+{
100653+ struct page *page;
100654+ struct kmem_cache *s;
100655+
100656+ if (ZERO_OR_NULL_PTR(ptr))
100657+ return false;
100658+
100659+ if (!slab_is_available())
100660+ return false;
100661+
100662+ if (!virt_addr_valid(ptr))
100663+ return false;
100664+
100665+ page = virt_to_head_page(ptr);
100666+
100667+ if (!PageSlab(page))
100668+ return false;
100669+
100670+ s = page->slab_cache;
100671+ return s->flags & SLAB_USERCOPY;
100672+}
100673+
100674+#ifdef CONFIG_PAX_USERCOPY
100675+const char *check_heap_object(const void *ptr, unsigned long n)
100676+{
100677+ struct page *page;
100678+ struct kmem_cache *s;
100679+ unsigned long offset;
100680+
100681+ if (ZERO_OR_NULL_PTR(ptr))
100682+ return "<null>";
100683+
100684+ if (!virt_addr_valid(ptr))
100685+ return NULL;
100686+
100687+ page = virt_to_head_page(ptr);
100688+
100689+ if (!PageSlab(page))
100690+ return NULL;
100691+
100692+ s = page->slab_cache;
100693+ if (!(s->flags & SLAB_USERCOPY))
100694+ return s->name;
100695+
100696+ offset = (ptr - page_address(page)) % s->size;
100697+ if (offset <= s->object_size && n <= s->object_size - offset)
100698+ return NULL;
100699+
100700+ return s->name;
100701+}
100702+#endif
100703+
100704 size_t ksize(const void *object)
100705 {
100706 struct page *page;
100707@@ -3341,6 +3405,7 @@ void kfree(const void *x)
100708 if (unlikely(ZERO_OR_NULL_PTR(x)))
100709 return;
100710
100711+ VM_BUG_ON(!virt_addr_valid(x));
100712 page = virt_to_head_page(x);
100713 if (unlikely(!PageSlab(page))) {
100714 BUG_ON(!PageCompound(page));
100715@@ -3642,7 +3707,7 @@ static int slab_unmergeable(struct kmem_cache *s)
100716 /*
100717 * We may have set a slab to be unmergeable during bootstrap.
100718 */
100719- if (s->refcount < 0)
100720+ if (atomic_read(&s->refcount) < 0)
100721 return 1;
100722
100723 return 0;
100724@@ -3699,7 +3764,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
100725 int i;
100726 struct kmem_cache *c;
100727
100728- s->refcount++;
100729+ atomic_inc(&s->refcount);
100730
100731 /*
100732 * Adjust the object sizes so that we clear
100733@@ -3718,7 +3783,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
100734 }
100735
100736 if (sysfs_slab_alias(s, name)) {
100737- s->refcount--;
100738+ atomic_dec(&s->refcount);
100739 s = NULL;
100740 }
100741 }
100742@@ -3835,7 +3900,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
100743 }
100744 #endif
100745
100746-#ifdef CONFIG_SYSFS
100747+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100748 static int count_inuse(struct page *page)
100749 {
100750 return page->inuse;
100751@@ -4116,7 +4181,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
100752 len += sprintf(buf + len, "%7ld ", l->count);
100753
100754 if (l->addr)
100755+#ifdef CONFIG_GRKERNSEC_HIDESYM
100756+ len += sprintf(buf + len, "%pS", NULL);
100757+#else
100758 len += sprintf(buf + len, "%pS", (void *)l->addr);
100759+#endif
100760 else
100761 len += sprintf(buf + len, "<not-available>");
100762
100763@@ -4218,12 +4287,12 @@ static void __init resiliency_test(void)
100764 validate_slab_cache(kmalloc_caches[9]);
100765 }
100766 #else
100767-#ifdef CONFIG_SYSFS
100768+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100769 static void resiliency_test(void) {};
100770 #endif
100771 #endif
100772
100773-#ifdef CONFIG_SYSFS
100774+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100775 enum slab_stat_type {
100776 SL_ALL, /* All slabs */
100777 SL_PARTIAL, /* Only partially allocated slabs */
100778@@ -4460,13 +4529,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
100779 {
100780 if (!s->ctor)
100781 return 0;
100782+#ifdef CONFIG_GRKERNSEC_HIDESYM
100783+ return sprintf(buf, "%pS\n", NULL);
100784+#else
100785 return sprintf(buf, "%pS\n", s->ctor);
100786+#endif
100787 }
100788 SLAB_ATTR_RO(ctor);
100789
100790 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
100791 {
100792- return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
100793+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) < 0 ? 0 : atomic_read(&s->refcount) - 1);
100794 }
100795 SLAB_ATTR_RO(aliases);
100796
100797@@ -4554,6 +4627,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
100798 SLAB_ATTR_RO(cache_dma);
100799 #endif
100800
100801+#ifdef CONFIG_PAX_USERCOPY_SLABS
100802+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
100803+{
100804+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
100805+}
100806+SLAB_ATTR_RO(usercopy);
100807+#endif
100808+
100809+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100810+static ssize_t sanitize_show(struct kmem_cache *s, char *buf)
100811+{
100812+ return sprintf(buf, "%d\n", !(s->flags & SLAB_NO_SANITIZE));
100813+}
100814+SLAB_ATTR_RO(sanitize);
100815+#endif
100816+
100817 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
100818 {
100819 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
100820@@ -4888,6 +4977,12 @@ static struct attribute *slab_attrs[] = {
100821 #ifdef CONFIG_ZONE_DMA
100822 &cache_dma_attr.attr,
100823 #endif
100824+#ifdef CONFIG_PAX_USERCOPY_SLABS
100825+ &usercopy_attr.attr,
100826+#endif
100827+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100828+ &sanitize_attr.attr,
100829+#endif
100830 #ifdef CONFIG_NUMA
100831 &remote_node_defrag_ratio_attr.attr,
100832 #endif
100833@@ -5132,6 +5227,7 @@ static char *create_unique_id(struct kmem_cache *s)
100834 return name;
100835 }
100836
100837+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100838 static int sysfs_slab_add(struct kmem_cache *s)
100839 {
100840 int err;
100841@@ -5205,6 +5301,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
100842 kobject_del(&s->kobj);
100843 kobject_put(&s->kobj);
100844 }
100845+#endif
100846
100847 /*
100848 * Need to buffer aliases during bootup until sysfs becomes
100849@@ -5218,6 +5315,7 @@ struct saved_alias {
100850
100851 static struct saved_alias *alias_list;
100852
100853+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100854 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
100855 {
100856 struct saved_alias *al;
100857@@ -5240,6 +5338,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
100858 alias_list = al;
100859 return 0;
100860 }
100861+#endif
100862
100863 static int __init slab_sysfs_init(void)
100864 {
100865diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
100866index 4cba9c2..b4f9fcc 100644
100867--- a/mm/sparse-vmemmap.c
100868+++ b/mm/sparse-vmemmap.c
100869@@ -131,7 +131,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
100870 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
100871 if (!p)
100872 return NULL;
100873- pud_populate(&init_mm, pud, p);
100874+ pud_populate_kernel(&init_mm, pud, p);
100875 }
100876 return pud;
100877 }
100878@@ -143,7 +143,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
100879 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
100880 if (!p)
100881 return NULL;
100882- pgd_populate(&init_mm, pgd, p);
100883+ pgd_populate_kernel(&init_mm, pgd, p);
100884 }
100885 return pgd;
100886 }
100887diff --git a/mm/sparse.c b/mm/sparse.c
100888index d1b48b6..6e8590e 100644
100889--- a/mm/sparse.c
100890+++ b/mm/sparse.c
100891@@ -750,7 +750,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
100892
100893 for (i = 0; i < PAGES_PER_SECTION; i++) {
100894 if (PageHWPoison(&memmap[i])) {
100895- atomic_long_sub(1, &num_poisoned_pages);
100896+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
100897 ClearPageHWPoison(&memmap[i]);
100898 }
100899 }
100900diff --git a/mm/swap.c b/mm/swap.c
100901index 6b2dc38..46b79ba 100644
100902--- a/mm/swap.c
100903+++ b/mm/swap.c
100904@@ -31,6 +31,7 @@
100905 #include <linux/memcontrol.h>
100906 #include <linux/gfp.h>
100907 #include <linux/uio.h>
100908+#include <linux/hugetlb.h>
100909
100910 #include "internal.h"
100911
100912@@ -77,6 +78,8 @@ static void __put_compound_page(struct page *page)
100913
100914 __page_cache_release(page);
100915 dtor = get_compound_page_dtor(page);
100916+ if (!PageHuge(page))
100917+ BUG_ON(dtor != free_compound_page);
100918 (*dtor)(page);
100919 }
100920
100921diff --git a/mm/swapfile.c b/mm/swapfile.c
100922index 8798b2e..348f9dd 100644
100923--- a/mm/swapfile.c
100924+++ b/mm/swapfile.c
100925@@ -84,7 +84,7 @@ static DEFINE_MUTEX(swapon_mutex);
100926
100927 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
100928 /* Activity counter to indicate that a swapon or swapoff has occurred */
100929-static atomic_t proc_poll_event = ATOMIC_INIT(0);
100930+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
100931
100932 static inline unsigned char swap_count(unsigned char ent)
100933 {
100934@@ -1944,7 +1944,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
100935 spin_unlock(&swap_lock);
100936
100937 err = 0;
100938- atomic_inc(&proc_poll_event);
100939+ atomic_inc_unchecked(&proc_poll_event);
100940 wake_up_interruptible(&proc_poll_wait);
100941
100942 out_dput:
100943@@ -1961,8 +1961,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
100944
100945 poll_wait(file, &proc_poll_wait, wait);
100946
100947- if (seq->poll_event != atomic_read(&proc_poll_event)) {
100948- seq->poll_event = atomic_read(&proc_poll_event);
100949+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
100950+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
100951 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
100952 }
100953
100954@@ -2060,7 +2060,7 @@ static int swaps_open(struct inode *inode, struct file *file)
100955 return ret;
100956
100957 seq = file->private_data;
100958- seq->poll_event = atomic_read(&proc_poll_event);
100959+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
100960 return 0;
100961 }
100962
100963@@ -2520,7 +2520,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
100964 (frontswap_map) ? "FS" : "");
100965
100966 mutex_unlock(&swapon_mutex);
100967- atomic_inc(&proc_poll_event);
100968+ atomic_inc_unchecked(&proc_poll_event);
100969 wake_up_interruptible(&proc_poll_wait);
100970
100971 if (S_ISREG(inode->i_mode))
100972diff --git a/mm/util.c b/mm/util.c
100973index 093c973..b70a268 100644
100974--- a/mm/util.c
100975+++ b/mm/util.c
100976@@ -202,6 +202,12 @@ done:
100977 void arch_pick_mmap_layout(struct mm_struct *mm)
100978 {
100979 mm->mmap_base = TASK_UNMAPPED_BASE;
100980+
100981+#ifdef CONFIG_PAX_RANDMMAP
100982+ if (mm->pax_flags & MF_PAX_RANDMMAP)
100983+ mm->mmap_base += mm->delta_mmap;
100984+#endif
100985+
100986 mm->get_unmapped_area = arch_get_unmapped_area;
100987 }
100988 #endif
100989@@ -378,6 +384,9 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
100990 if (!mm->arg_end)
100991 goto out_mm; /* Shh! No looking before we're done */
100992
100993+ if (gr_acl_handle_procpidmem(task))
100994+ goto out_mm;
100995+
100996 len = mm->arg_end - mm->arg_start;
100997
100998 if (len > buflen)
100999diff --git a/mm/vmalloc.c b/mm/vmalloc.c
101000index 2b0aa54..b451f74 100644
101001--- a/mm/vmalloc.c
101002+++ b/mm/vmalloc.c
101003@@ -40,6 +40,21 @@ struct vfree_deferred {
101004 };
101005 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
101006
101007+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
101008+struct stack_deferred_llist {
101009+ struct llist_head list;
101010+ void *stack;
101011+ void *lowmem_stack;
101012+};
101013+
101014+struct stack_deferred {
101015+ struct stack_deferred_llist list;
101016+ struct work_struct wq;
101017+};
101018+
101019+static DEFINE_PER_CPU(struct stack_deferred, stack_deferred);
101020+#endif
101021+
101022 static void __vunmap(const void *, int);
101023
101024 static void free_work(struct work_struct *w)
101025@@ -47,12 +62,30 @@ static void free_work(struct work_struct *w)
101026 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
101027 struct llist_node *llnode = llist_del_all(&p->list);
101028 while (llnode) {
101029- void *p = llnode;
101030+ void *x = llnode;
101031 llnode = llist_next(llnode);
101032- __vunmap(p, 1);
101033+ __vunmap(x, 1);
101034 }
101035 }
101036
101037+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
101038+static void unmap_work(struct work_struct *w)
101039+{
101040+ struct stack_deferred *p = container_of(w, struct stack_deferred, wq);
101041+ struct llist_node *llnode = llist_del_all(&p->list.list);
101042+ while (llnode) {
101043+ struct stack_deferred_llist *x =
101044+ llist_entry((struct llist_head *)llnode,
101045+ struct stack_deferred_llist, list);
101046+ void *stack = ACCESS_ONCE(x->stack);
101047+ void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack);
101048+ llnode = llist_next(llnode);
101049+ __vunmap(stack, 0);
101050+ free_kmem_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER);
101051+ }
101052+}
101053+#endif
101054+
101055 /*** Page table manipulation functions ***/
101056
101057 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
101058@@ -61,8 +94,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
101059
101060 pte = pte_offset_kernel(pmd, addr);
101061 do {
101062- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
101063- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
101064+
101065+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
101066+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
101067+ BUG_ON(!pte_exec(*pte));
101068+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
101069+ continue;
101070+ }
101071+#endif
101072+
101073+ {
101074+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
101075+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
101076+ }
101077 } while (pte++, addr += PAGE_SIZE, addr != end);
101078 }
101079
101080@@ -122,16 +166,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
101081 pte = pte_alloc_kernel(pmd, addr);
101082 if (!pte)
101083 return -ENOMEM;
101084+
101085+ pax_open_kernel();
101086 do {
101087 struct page *page = pages[*nr];
101088
101089- if (WARN_ON(!pte_none(*pte)))
101090+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
101091+ if (pgprot_val(prot) & _PAGE_NX)
101092+#endif
101093+
101094+ if (!pte_none(*pte)) {
101095+ pax_close_kernel();
101096+ WARN_ON(1);
101097 return -EBUSY;
101098- if (WARN_ON(!page))
101099+ }
101100+ if (!page) {
101101+ pax_close_kernel();
101102+ WARN_ON(1);
101103 return -ENOMEM;
101104+ }
101105 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
101106 (*nr)++;
101107 } while (pte++, addr += PAGE_SIZE, addr != end);
101108+ pax_close_kernel();
101109 return 0;
101110 }
101111
101112@@ -141,7 +198,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
101113 pmd_t *pmd;
101114 unsigned long next;
101115
101116- pmd = pmd_alloc(&init_mm, pud, addr);
101117+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
101118 if (!pmd)
101119 return -ENOMEM;
101120 do {
101121@@ -158,7 +215,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
101122 pud_t *pud;
101123 unsigned long next;
101124
101125- pud = pud_alloc(&init_mm, pgd, addr);
101126+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
101127 if (!pud)
101128 return -ENOMEM;
101129 do {
101130@@ -218,6 +275,12 @@ int is_vmalloc_or_module_addr(const void *x)
101131 if (addr >= MODULES_VADDR && addr < MODULES_END)
101132 return 1;
101133 #endif
101134+
101135+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
101136+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
101137+ return 1;
101138+#endif
101139+
101140 return is_vmalloc_addr(x);
101141 }
101142
101143@@ -238,8 +301,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
101144
101145 if (!pgd_none(*pgd)) {
101146 pud_t *pud = pud_offset(pgd, addr);
101147+#ifdef CONFIG_X86
101148+ if (!pud_large(*pud))
101149+#endif
101150 if (!pud_none(*pud)) {
101151 pmd_t *pmd = pmd_offset(pud, addr);
101152+#ifdef CONFIG_X86
101153+ if (!pmd_large(*pmd))
101154+#endif
101155 if (!pmd_none(*pmd)) {
101156 pte_t *ptep, pte;
101157
101158@@ -1183,13 +1252,23 @@ void __init vmalloc_init(void)
101159 for_each_possible_cpu(i) {
101160 struct vmap_block_queue *vbq;
101161 struct vfree_deferred *p;
101162+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
101163+ struct stack_deferred *p2;
101164+#endif
101165
101166 vbq = &per_cpu(vmap_block_queue, i);
101167 spin_lock_init(&vbq->lock);
101168 INIT_LIST_HEAD(&vbq->free);
101169+
101170 p = &per_cpu(vfree_deferred, i);
101171 init_llist_head(&p->list);
101172 INIT_WORK(&p->wq, free_work);
101173+
101174+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
101175+ p2 = &per_cpu(stack_deferred, i);
101176+ init_llist_head(&p2->list.list);
101177+ INIT_WORK(&p2->wq, unmap_work);
101178+#endif
101179 }
101180
101181 /* Import existing vmlist entries. */
101182@@ -1314,6 +1393,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
101183 struct vm_struct *area;
101184
101185 BUG_ON(in_interrupt());
101186+
101187+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
101188+ if (flags & VM_KERNEXEC) {
101189+ if (start != VMALLOC_START || end != VMALLOC_END)
101190+ return NULL;
101191+ start = (unsigned long)MODULES_EXEC_VADDR;
101192+ end = (unsigned long)MODULES_EXEC_END;
101193+ }
101194+#endif
101195+
101196 if (flags & VM_IOREMAP)
101197 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
101198
101199@@ -1519,6 +1608,23 @@ void vunmap(const void *addr)
101200 }
101201 EXPORT_SYMBOL(vunmap);
101202
101203+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
101204+void unmap_process_stacks(struct task_struct *task)
101205+{
101206+ if (unlikely(in_interrupt())) {
101207+ struct stack_deferred *p = &__get_cpu_var(stack_deferred);
101208+ struct stack_deferred_llist *list = task->stack;
101209+ list->stack = task->stack;
101210+ list->lowmem_stack = task->lowmem_stack;
101211+ if (llist_add((struct llist_node *)&list->list, &p->list.list))
101212+ schedule_work(&p->wq);
101213+ } else {
101214+ __vunmap(task->stack, 0);
101215+ free_kmem_pages((unsigned long)task->lowmem_stack, THREAD_SIZE_ORDER);
101216+ }
101217+}
101218+#endif
101219+
101220 /**
101221 * vmap - map an array of pages into virtually contiguous space
101222 * @pages: array of page pointers
101223@@ -1539,6 +1645,11 @@ void *vmap(struct page **pages, unsigned int count,
101224 if (count > totalram_pages)
101225 return NULL;
101226
101227+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
101228+ if (!(pgprot_val(prot) & _PAGE_NX))
101229+ flags |= VM_KERNEXEC;
101230+#endif
101231+
101232 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
101233 __builtin_return_address(0));
101234 if (!area)
101235@@ -1641,6 +1752,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
101236 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
101237 goto fail;
101238
101239+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
101240+ if (!(pgprot_val(prot) & _PAGE_NX))
101241+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | VM_KERNEXEC,
101242+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
101243+ else
101244+#endif
101245+
101246 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
101247 start, end, node, gfp_mask, caller);
101248 if (!area)
101249@@ -1817,10 +1935,9 @@ EXPORT_SYMBOL(vzalloc_node);
101250 * For tight control over page level allocator and protection flags
101251 * use __vmalloc() instead.
101252 */
101253-
101254 void *vmalloc_exec(unsigned long size)
101255 {
101256- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
101257+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
101258 NUMA_NO_NODE, __builtin_return_address(0));
101259 }
101260
101261@@ -2127,6 +2244,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
101262 {
101263 struct vm_struct *area;
101264
101265+ BUG_ON(vma->vm_mirror);
101266+
101267 size = PAGE_ALIGN(size);
101268
101269 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
101270@@ -2609,7 +2728,11 @@ static int s_show(struct seq_file *m, void *p)
101271 v->addr, v->addr + v->size, v->size);
101272
101273 if (v->caller)
101274+#ifdef CONFIG_GRKERNSEC_HIDESYM
101275+ seq_printf(m, " %pK", v->caller);
101276+#else
101277 seq_printf(m, " %pS", v->caller);
101278+#endif
101279
101280 if (v->nr_pages)
101281 seq_printf(m, " pages=%d", v->nr_pages);
101282diff --git a/mm/vmstat.c b/mm/vmstat.c
101283index e9ab104..de275bd 100644
101284--- a/mm/vmstat.c
101285+++ b/mm/vmstat.c
101286@@ -20,6 +20,7 @@
101287 #include <linux/writeback.h>
101288 #include <linux/compaction.h>
101289 #include <linux/mm_inline.h>
101290+#include <linux/grsecurity.h>
101291
101292 #include "internal.h"
101293
101294@@ -79,7 +80,7 @@ void vm_events_fold_cpu(int cpu)
101295 *
101296 * vm_stat contains the global counters
101297 */
101298-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
101299+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
101300 EXPORT_SYMBOL(vm_stat);
101301
101302 #ifdef CONFIG_SMP
101303@@ -425,7 +426,7 @@ static inline void fold_diff(int *diff)
101304
101305 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
101306 if (diff[i])
101307- atomic_long_add(diff[i], &vm_stat[i]);
101308+ atomic_long_add_unchecked(diff[i], &vm_stat[i]);
101309 }
101310
101311 /*
101312@@ -457,7 +458,7 @@ static void refresh_cpu_vm_stats(void)
101313 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
101314 if (v) {
101315
101316- atomic_long_add(v, &zone->vm_stat[i]);
101317+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
101318 global_diff[i] += v;
101319 #ifdef CONFIG_NUMA
101320 /* 3 seconds idle till flush */
101321@@ -519,7 +520,7 @@ void cpu_vm_stats_fold(int cpu)
101322
101323 v = p->vm_stat_diff[i];
101324 p->vm_stat_diff[i] = 0;
101325- atomic_long_add(v, &zone->vm_stat[i]);
101326+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
101327 global_diff[i] += v;
101328 }
101329 }
101330@@ -539,8 +540,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
101331 if (pset->vm_stat_diff[i]) {
101332 int v = pset->vm_stat_diff[i];
101333 pset->vm_stat_diff[i] = 0;
101334- atomic_long_add(v, &zone->vm_stat[i]);
101335- atomic_long_add(v, &vm_stat[i]);
101336+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
101337+ atomic_long_add_unchecked(v, &vm_stat[i]);
101338 }
101339 }
101340 #endif
101341@@ -1163,10 +1164,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
101342 stat_items_size += sizeof(struct vm_event_state);
101343 #endif
101344
101345- v = kmalloc(stat_items_size, GFP_KERNEL);
101346+ v = kzalloc(stat_items_size, GFP_KERNEL);
101347 m->private = v;
101348 if (!v)
101349 return ERR_PTR(-ENOMEM);
101350+
101351+#ifdef CONFIG_GRKERNSEC_PROC_ADD
101352+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
101353+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
101354+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
101355+ && !in_group_p(grsec_proc_gid)
101356+#endif
101357+ )
101358+ return (unsigned long *)m->private + *pos;
101359+#endif
101360+#endif
101361+
101362 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
101363 v[i] = global_page_state(i);
101364 v += NR_VM_ZONE_STAT_ITEMS;
101365@@ -1315,10 +1328,16 @@ static int __init setup_vmstat(void)
101366 cpu_notifier_register_done();
101367 #endif
101368 #ifdef CONFIG_PROC_FS
101369- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
101370- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
101371- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
101372- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
101373+ {
101374+ mode_t gr_mode = S_IRUGO;
101375+#ifdef CONFIG_GRKERNSEC_PROC_ADD
101376+ gr_mode = S_IRUSR;
101377+#endif
101378+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
101379+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
101380+ proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
101381+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
101382+ }
101383 #endif
101384 return 0;
101385 }
101386diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
101387index 64c6bed..b79a5de 100644
101388--- a/net/8021q/vlan.c
101389+++ b/net/8021q/vlan.c
101390@@ -481,7 +481,7 @@ out:
101391 return NOTIFY_DONE;
101392 }
101393
101394-static struct notifier_block vlan_notifier_block __read_mostly = {
101395+static struct notifier_block vlan_notifier_block = {
101396 .notifier_call = vlan_device_event,
101397 };
101398
101399@@ -556,8 +556,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
101400 err = -EPERM;
101401 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
101402 break;
101403- if ((args.u.name_type >= 0) &&
101404- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
101405+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
101406 struct vlan_net *vn;
101407
101408 vn = net_generic(net, vlan_net_id);
101409diff --git a/net/9p/client.c b/net/9p/client.c
101410index e86a9bea..e91f70e 100644
101411--- a/net/9p/client.c
101412+++ b/net/9p/client.c
101413@@ -596,7 +596,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
101414 len - inline_len);
101415 } else {
101416 err = copy_from_user(ename + inline_len,
101417- uidata, len - inline_len);
101418+ (char __force_user *)uidata, len - inline_len);
101419 if (err) {
101420 err = -EFAULT;
101421 goto out_err;
101422@@ -1570,7 +1570,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
101423 kernel_buf = 1;
101424 indata = data;
101425 } else
101426- indata = (__force char *)udata;
101427+ indata = (__force_kernel char *)udata;
101428 /*
101429 * response header len is 11
101430 * PDU Header(7) + IO Size (4)
101431@@ -1645,7 +1645,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
101432 kernel_buf = 1;
101433 odata = data;
101434 } else
101435- odata = (char *)udata;
101436+ odata = (char __force_kernel *)udata;
101437 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
101438 P9_ZC_HDR_SZ, kernel_buf, "dqd",
101439 fid->fid, offset, rsize);
101440diff --git a/net/9p/mod.c b/net/9p/mod.c
101441index 6ab36ae..6f1841b 100644
101442--- a/net/9p/mod.c
101443+++ b/net/9p/mod.c
101444@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
101445 void v9fs_register_trans(struct p9_trans_module *m)
101446 {
101447 spin_lock(&v9fs_trans_lock);
101448- list_add_tail(&m->list, &v9fs_trans_list);
101449+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
101450 spin_unlock(&v9fs_trans_lock);
101451 }
101452 EXPORT_SYMBOL(v9fs_register_trans);
101453@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
101454 void v9fs_unregister_trans(struct p9_trans_module *m)
101455 {
101456 spin_lock(&v9fs_trans_lock);
101457- list_del_init(&m->list);
101458+ pax_list_del_init((struct list_head *)&m->list);
101459 spin_unlock(&v9fs_trans_lock);
101460 }
101461 EXPORT_SYMBOL(v9fs_unregister_trans);
101462diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
101463index 80d08f6..de63fd1 100644
101464--- a/net/9p/trans_fd.c
101465+++ b/net/9p/trans_fd.c
101466@@ -428,7 +428,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
101467 oldfs = get_fs();
101468 set_fs(get_ds());
101469 /* The cast to a user pointer is valid due to the set_fs() */
101470- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
101471+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
101472 set_fs(oldfs);
101473
101474 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
101475diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
101476index af46bc4..f9adfcd 100644
101477--- a/net/appletalk/atalk_proc.c
101478+++ b/net/appletalk/atalk_proc.c
101479@@ -256,7 +256,7 @@ int __init atalk_proc_init(void)
101480 struct proc_dir_entry *p;
101481 int rc = -ENOMEM;
101482
101483- atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
101484+ atalk_proc_dir = proc_mkdir_restrict("atalk", init_net.proc_net);
101485 if (!atalk_proc_dir)
101486 goto out;
101487
101488diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
101489index 876fbe8..8bbea9f 100644
101490--- a/net/atm/atm_misc.c
101491+++ b/net/atm/atm_misc.c
101492@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
101493 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
101494 return 1;
101495 atm_return(vcc, truesize);
101496- atomic_inc(&vcc->stats->rx_drop);
101497+ atomic_inc_unchecked(&vcc->stats->rx_drop);
101498 return 0;
101499 }
101500 EXPORT_SYMBOL(atm_charge);
101501@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
101502 }
101503 }
101504 atm_return(vcc, guess);
101505- atomic_inc(&vcc->stats->rx_drop);
101506+ atomic_inc_unchecked(&vcc->stats->rx_drop);
101507 return NULL;
101508 }
101509 EXPORT_SYMBOL(atm_alloc_charge);
101510@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
101511
101512 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
101513 {
101514-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
101515+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
101516 __SONET_ITEMS
101517 #undef __HANDLE_ITEM
101518 }
101519@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
101520
101521 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
101522 {
101523-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
101524+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
101525 __SONET_ITEMS
101526 #undef __HANDLE_ITEM
101527 }
101528diff --git a/net/atm/lec.c b/net/atm/lec.c
101529index 4b98f89..5a2f6cb 100644
101530--- a/net/atm/lec.c
101531+++ b/net/atm/lec.c
101532@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
101533 }
101534
101535 static struct lane2_ops lane2_ops = {
101536- lane2_resolve, /* resolve, spec 3.1.3 */
101537- lane2_associate_req, /* associate_req, spec 3.1.4 */
101538- NULL /* associate indicator, spec 3.1.5 */
101539+ .resolve = lane2_resolve,
101540+ .associate_req = lane2_associate_req,
101541+ .associate_indicator = NULL
101542 };
101543
101544 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
101545diff --git a/net/atm/lec.h b/net/atm/lec.h
101546index 4149db1..f2ab682 100644
101547--- a/net/atm/lec.h
101548+++ b/net/atm/lec.h
101549@@ -48,7 +48,7 @@ struct lane2_ops {
101550 const u8 *tlvs, u32 sizeoftlvs);
101551 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
101552 const u8 *tlvs, u32 sizeoftlvs);
101553-};
101554+} __no_const;
101555
101556 /*
101557 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
101558diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
101559index d1b2d9a..d549f7f 100644
101560--- a/net/atm/mpoa_caches.c
101561+++ b/net/atm/mpoa_caches.c
101562@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
101563
101564
101565 static struct in_cache_ops ingress_ops = {
101566- in_cache_add_entry, /* add_entry */
101567- in_cache_get, /* get */
101568- in_cache_get_with_mask, /* get_with_mask */
101569- in_cache_get_by_vcc, /* get_by_vcc */
101570- in_cache_put, /* put */
101571- in_cache_remove_entry, /* remove_entry */
101572- cache_hit, /* cache_hit */
101573- clear_count_and_expired, /* clear_count */
101574- check_resolving_entries, /* check_resolving */
101575- refresh_entries, /* refresh */
101576- in_destroy_cache /* destroy_cache */
101577+ .add_entry = in_cache_add_entry,
101578+ .get = in_cache_get,
101579+ .get_with_mask = in_cache_get_with_mask,
101580+ .get_by_vcc = in_cache_get_by_vcc,
101581+ .put = in_cache_put,
101582+ .remove_entry = in_cache_remove_entry,
101583+ .cache_hit = cache_hit,
101584+ .clear_count = clear_count_and_expired,
101585+ .check_resolving = check_resolving_entries,
101586+ .refresh = refresh_entries,
101587+ .destroy_cache = in_destroy_cache
101588 };
101589
101590 static struct eg_cache_ops egress_ops = {
101591- eg_cache_add_entry, /* add_entry */
101592- eg_cache_get_by_cache_id, /* get_by_cache_id */
101593- eg_cache_get_by_tag, /* get_by_tag */
101594- eg_cache_get_by_vcc, /* get_by_vcc */
101595- eg_cache_get_by_src_ip, /* get_by_src_ip */
101596- eg_cache_put, /* put */
101597- eg_cache_remove_entry, /* remove_entry */
101598- update_eg_cache_entry, /* update */
101599- clear_expired, /* clear_expired */
101600- eg_destroy_cache /* destroy_cache */
101601+ .add_entry = eg_cache_add_entry,
101602+ .get_by_cache_id = eg_cache_get_by_cache_id,
101603+ .get_by_tag = eg_cache_get_by_tag,
101604+ .get_by_vcc = eg_cache_get_by_vcc,
101605+ .get_by_src_ip = eg_cache_get_by_src_ip,
101606+ .put = eg_cache_put,
101607+ .remove_entry = eg_cache_remove_entry,
101608+ .update = update_eg_cache_entry,
101609+ .clear_expired = clear_expired,
101610+ .destroy_cache = eg_destroy_cache
101611 };
101612
101613
101614diff --git a/net/atm/proc.c b/net/atm/proc.c
101615index bbb6461..cf04016 100644
101616--- a/net/atm/proc.c
101617+++ b/net/atm/proc.c
101618@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
101619 const struct k_atm_aal_stats *stats)
101620 {
101621 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
101622- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
101623- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
101624- atomic_read(&stats->rx_drop));
101625+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
101626+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
101627+ atomic_read_unchecked(&stats->rx_drop));
101628 }
101629
101630 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
101631diff --git a/net/atm/resources.c b/net/atm/resources.c
101632index 0447d5d..3cf4728 100644
101633--- a/net/atm/resources.c
101634+++ b/net/atm/resources.c
101635@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
101636 static void copy_aal_stats(struct k_atm_aal_stats *from,
101637 struct atm_aal_stats *to)
101638 {
101639-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
101640+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
101641 __AAL_STAT_ITEMS
101642 #undef __HANDLE_ITEM
101643 }
101644@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
101645 static void subtract_aal_stats(struct k_atm_aal_stats *from,
101646 struct atm_aal_stats *to)
101647 {
101648-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
101649+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
101650 __AAL_STAT_ITEMS
101651 #undef __HANDLE_ITEM
101652 }
101653diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
101654index 919a5ce..cc6b444 100644
101655--- a/net/ax25/sysctl_net_ax25.c
101656+++ b/net/ax25/sysctl_net_ax25.c
101657@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
101658 {
101659 char path[sizeof("net/ax25/") + IFNAMSIZ];
101660 int k;
101661- struct ctl_table *table;
101662+ ctl_table_no_const *table;
101663
101664 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
101665 if (!table)
101666diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
101667index 1e80539..676c37a 100644
101668--- a/net/batman-adv/bat_iv_ogm.c
101669+++ b/net/batman-adv/bat_iv_ogm.c
101670@@ -313,7 +313,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
101671
101672 /* randomize initial seqno to avoid collision */
101673 get_random_bytes(&random_seqno, sizeof(random_seqno));
101674- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
101675+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
101676
101677 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
101678 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
101679@@ -918,9 +918,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
101680 batadv_ogm_packet->tvlv_len = htons(tvlv_len);
101681
101682 /* change sequence number to network order */
101683- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
101684+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
101685 batadv_ogm_packet->seqno = htonl(seqno);
101686- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
101687+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
101688
101689 batadv_iv_ogm_slide_own_bcast_window(hard_iface);
101690
101691@@ -1597,7 +1597,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
101692 return;
101693
101694 /* could be changed by schedule_own_packet() */
101695- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
101696+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
101697
101698 if (ogm_packet->flags & BATADV_DIRECTLINK)
101699 has_directlink_flag = true;
101700diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
101701index fc1835c..eead856 100644
101702--- a/net/batman-adv/fragmentation.c
101703+++ b/net/batman-adv/fragmentation.c
101704@@ -450,7 +450,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
101705 frag_header.packet_type = BATADV_UNICAST_FRAG;
101706 frag_header.version = BATADV_COMPAT_VERSION;
101707 frag_header.ttl = BATADV_TTL;
101708- frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
101709+ frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
101710 frag_header.reserved = 0;
101711 frag_header.no = 0;
101712 frag_header.total_size = htons(skb->len);
101713diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
101714index 5467955..30cc771 100644
101715--- a/net/batman-adv/soft-interface.c
101716+++ b/net/batman-adv/soft-interface.c
101717@@ -296,7 +296,7 @@ send:
101718 primary_if->net_dev->dev_addr);
101719
101720 /* set broadcast sequence number */
101721- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
101722+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
101723 bcast_packet->seqno = htonl(seqno);
101724
101725 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
101726@@ -761,7 +761,7 @@ static int batadv_softif_init_late(struct net_device *dev)
101727 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
101728
101729 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
101730- atomic_set(&bat_priv->bcast_seqno, 1);
101731+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
101732 atomic_set(&bat_priv->tt.vn, 0);
101733 atomic_set(&bat_priv->tt.local_changes, 0);
101734 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
101735@@ -775,7 +775,7 @@ static int batadv_softif_init_late(struct net_device *dev)
101736
101737 /* randomize initial seqno to avoid collision */
101738 get_random_bytes(&random_seqno, sizeof(random_seqno));
101739- atomic_set(&bat_priv->frag_seqno, random_seqno);
101740+ atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
101741
101742 bat_priv->primary_if = NULL;
101743 bat_priv->num_ifaces = 0;
101744diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
101745index 8854c05..ee5d5497 100644
101746--- a/net/batman-adv/types.h
101747+++ b/net/batman-adv/types.h
101748@@ -67,7 +67,7 @@ enum batadv_dhcp_recipient {
101749 struct batadv_hard_iface_bat_iv {
101750 unsigned char *ogm_buff;
101751 int ogm_buff_len;
101752- atomic_t ogm_seqno;
101753+ atomic_unchecked_t ogm_seqno;
101754 };
101755
101756 /**
101757@@ -768,7 +768,7 @@ struct batadv_priv {
101758 atomic_t bonding;
101759 atomic_t fragmentation;
101760 atomic_t packet_size_max;
101761- atomic_t frag_seqno;
101762+ atomic_unchecked_t frag_seqno;
101763 #ifdef CONFIG_BATMAN_ADV_BLA
101764 atomic_t bridge_loop_avoidance;
101765 #endif
101766@@ -787,7 +787,7 @@ struct batadv_priv {
101767 #endif
101768 uint32_t isolation_mark;
101769 uint32_t isolation_mark_mask;
101770- atomic_t bcast_seqno;
101771+ atomic_unchecked_t bcast_seqno;
101772 atomic_t bcast_queue_left;
101773 atomic_t batman_queue_left;
101774 char num_ifaces;
101775diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
101776index 115f149..f0ba286 100644
101777--- a/net/bluetooth/hci_sock.c
101778+++ b/net/bluetooth/hci_sock.c
101779@@ -1067,7 +1067,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
101780 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
101781 }
101782
101783- len = min_t(unsigned int, len, sizeof(uf));
101784+ len = min((size_t)len, sizeof(uf));
101785 if (copy_from_user(&uf, optval, len)) {
101786 err = -EFAULT;
101787 break;
101788diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
101789index 14ca8ae..262d49a 100644
101790--- a/net/bluetooth/l2cap_core.c
101791+++ b/net/bluetooth/l2cap_core.c
101792@@ -3565,8 +3565,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
101793 break;
101794
101795 case L2CAP_CONF_RFC:
101796- if (olen == sizeof(rfc))
101797- memcpy(&rfc, (void *)val, olen);
101798+ if (olen != sizeof(rfc))
101799+ break;
101800+
101801+ memcpy(&rfc, (void *)val, olen);
101802
101803 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
101804 rfc.mode != chan->mode)
101805diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
101806index 1884f72..b3b71f9 100644
101807--- a/net/bluetooth/l2cap_sock.c
101808+++ b/net/bluetooth/l2cap_sock.c
101809@@ -629,7 +629,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
101810 struct sock *sk = sock->sk;
101811 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
101812 struct l2cap_options opts;
101813- int len, err = 0;
101814+ int err = 0;
101815+ size_t len = optlen;
101816 u32 opt;
101817
101818 BT_DBG("sk %p", sk);
101819@@ -656,7 +657,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
101820 opts.max_tx = chan->max_tx;
101821 opts.txwin_size = chan->tx_win;
101822
101823- len = min_t(unsigned int, sizeof(opts), optlen);
101824+ len = min(sizeof(opts), len);
101825 if (copy_from_user((char *) &opts, optval, len)) {
101826 err = -EFAULT;
101827 break;
101828@@ -743,7 +744,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
101829 struct bt_security sec;
101830 struct bt_power pwr;
101831 struct l2cap_conn *conn;
101832- int len, err = 0;
101833+ int err = 0;
101834+ size_t len = optlen;
101835 u32 opt;
101836
101837 BT_DBG("sk %p", sk);
101838@@ -767,7 +769,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
101839
101840 sec.level = BT_SECURITY_LOW;
101841
101842- len = min_t(unsigned int, sizeof(sec), optlen);
101843+ len = min(sizeof(sec), len);
101844 if (copy_from_user((char *) &sec, optval, len)) {
101845 err = -EFAULT;
101846 break;
101847@@ -862,7 +864,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
101848
101849 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
101850
101851- len = min_t(unsigned int, sizeof(pwr), optlen);
101852+ len = min(sizeof(pwr), len);
101853 if (copy_from_user((char *) &pwr, optval, len)) {
101854 err = -EFAULT;
101855 break;
101856diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
101857index 8bbbb5e..6fc0950 100644
101858--- a/net/bluetooth/rfcomm/sock.c
101859+++ b/net/bluetooth/rfcomm/sock.c
101860@@ -687,7 +687,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
101861 struct sock *sk = sock->sk;
101862 struct bt_security sec;
101863 int err = 0;
101864- size_t len;
101865+ size_t len = optlen;
101866 u32 opt;
101867
101868 BT_DBG("sk %p", sk);
101869@@ -709,7 +709,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
101870
101871 sec.level = BT_SECURITY_LOW;
101872
101873- len = min_t(unsigned int, sizeof(sec), optlen);
101874+ len = min(sizeof(sec), len);
101875 if (copy_from_user((char *) &sec, optval, len)) {
101876 err = -EFAULT;
101877 break;
101878diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
101879index 8e385a0..a5bdd8e 100644
101880--- a/net/bluetooth/rfcomm/tty.c
101881+++ b/net/bluetooth/rfcomm/tty.c
101882@@ -752,7 +752,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
101883 BT_DBG("tty %p id %d", tty, tty->index);
101884
101885 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
101886- dev->channel, dev->port.count);
101887+ dev->channel, atomic_read(&dev->port.count));
101888
101889 err = tty_port_open(&dev->port, tty, filp);
101890 if (err)
101891@@ -775,7 +775,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
101892 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
101893
101894 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
101895- dev->port.count);
101896+ atomic_read(&dev->port.count));
101897
101898 tty_port_close(&dev->port, tty, filp);
101899 }
101900diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
101901index 6d69631..b8fdc85 100644
101902--- a/net/bridge/netfilter/ebtables.c
101903+++ b/net/bridge/netfilter/ebtables.c
101904@@ -1518,7 +1518,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
101905 tmp.valid_hooks = t->table->valid_hooks;
101906 }
101907 mutex_unlock(&ebt_mutex);
101908- if (copy_to_user(user, &tmp, *len) != 0) {
101909+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101910 BUGPRINT("c2u Didn't work\n");
101911 ret = -EFAULT;
101912 break;
101913@@ -2324,7 +2324,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
101914 goto out;
101915 tmp.valid_hooks = t->valid_hooks;
101916
101917- if (copy_to_user(user, &tmp, *len) != 0) {
101918+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101919 ret = -EFAULT;
101920 break;
101921 }
101922@@ -2335,7 +2335,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
101923 tmp.entries_size = t->table->entries_size;
101924 tmp.valid_hooks = t->table->valid_hooks;
101925
101926- if (copy_to_user(user, &tmp, *len) != 0) {
101927+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101928 ret = -EFAULT;
101929 break;
101930 }
101931diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
101932index f5afda1..dcf770a 100644
101933--- a/net/caif/cfctrl.c
101934+++ b/net/caif/cfctrl.c
101935@@ -10,6 +10,7 @@
101936 #include <linux/spinlock.h>
101937 #include <linux/slab.h>
101938 #include <linux/pkt_sched.h>
101939+#include <linux/sched.h>
101940 #include <net/caif/caif_layer.h>
101941 #include <net/caif/cfpkt.h>
101942 #include <net/caif/cfctrl.h>
101943@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
101944 memset(&dev_info, 0, sizeof(dev_info));
101945 dev_info.id = 0xff;
101946 cfsrvl_init(&this->serv, 0, &dev_info, false);
101947- atomic_set(&this->req_seq_no, 1);
101948- atomic_set(&this->rsp_seq_no, 1);
101949+ atomic_set_unchecked(&this->req_seq_no, 1);
101950+ atomic_set_unchecked(&this->rsp_seq_no, 1);
101951 this->serv.layer.receive = cfctrl_recv;
101952 sprintf(this->serv.layer.name, "ctrl");
101953 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
101954@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
101955 struct cfctrl_request_info *req)
101956 {
101957 spin_lock_bh(&ctrl->info_list_lock);
101958- atomic_inc(&ctrl->req_seq_no);
101959- req->sequence_no = atomic_read(&ctrl->req_seq_no);
101960+ atomic_inc_unchecked(&ctrl->req_seq_no);
101961+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
101962 list_add_tail(&req->list, &ctrl->list);
101963 spin_unlock_bh(&ctrl->info_list_lock);
101964 }
101965@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
101966 if (p != first)
101967 pr_warn("Requests are not received in order\n");
101968
101969- atomic_set(&ctrl->rsp_seq_no,
101970+ atomic_set_unchecked(&ctrl->rsp_seq_no,
101971 p->sequence_no);
101972 list_del(&p->list);
101973 goto out;
101974diff --git a/net/can/af_can.c b/net/can/af_can.c
101975index ce82337..5d17b4d 100644
101976--- a/net/can/af_can.c
101977+++ b/net/can/af_can.c
101978@@ -884,7 +884,7 @@ static const struct net_proto_family can_family_ops = {
101979 };
101980
101981 /* notifier block for netdevice event */
101982-static struct notifier_block can_netdev_notifier __read_mostly = {
101983+static struct notifier_block can_netdev_notifier = {
101984 .notifier_call = can_notifier,
101985 };
101986
101987diff --git a/net/can/bcm.c b/net/can/bcm.c
101988index dcb75c0..24b1b43 100644
101989--- a/net/can/bcm.c
101990+++ b/net/can/bcm.c
101991@@ -1624,7 +1624,7 @@ static int __init bcm_module_init(void)
101992 }
101993
101994 /* create /proc/net/can-bcm directory */
101995- proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
101996+ proc_dir = proc_mkdir_restrict("can-bcm", init_net.proc_net);
101997 return 0;
101998 }
101999
102000diff --git a/net/can/gw.c b/net/can/gw.c
102001index 050a211..bb9fe33 100644
102002--- a/net/can/gw.c
102003+++ b/net/can/gw.c
102004@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
102005 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
102006
102007 static HLIST_HEAD(cgw_list);
102008-static struct notifier_block notifier;
102009
102010 static struct kmem_cache *cgw_cache __read_mostly;
102011
102012@@ -947,6 +946,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
102013 return err;
102014 }
102015
102016+static struct notifier_block notifier = {
102017+ .notifier_call = cgw_notifier
102018+};
102019+
102020 static __init int cgw_module_init(void)
102021 {
102022 /* sanitize given module parameter */
102023@@ -962,7 +965,6 @@ static __init int cgw_module_init(void)
102024 return -ENOMEM;
102025
102026 /* set notifier */
102027- notifier.notifier_call = cgw_notifier;
102028 register_netdevice_notifier(&notifier);
102029
102030 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
102031diff --git a/net/can/proc.c b/net/can/proc.c
102032index 1a19b98..df2b4ec 100644
102033--- a/net/can/proc.c
102034+++ b/net/can/proc.c
102035@@ -514,7 +514,7 @@ static void can_remove_proc_readentry(const char *name)
102036 void can_init_proc(void)
102037 {
102038 /* create /proc/net/can directory */
102039- can_dir = proc_mkdir("can", init_net.proc_net);
102040+ can_dir = proc_mkdir_restrict("can", init_net.proc_net);
102041
102042 if (!can_dir) {
102043 printk(KERN_INFO "can: failed to create /proc/net/can . "
102044diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
102045index b2f571d..e6160e9 100644
102046--- a/net/ceph/messenger.c
102047+++ b/net/ceph/messenger.c
102048@@ -188,7 +188,7 @@ static void con_fault(struct ceph_connection *con);
102049 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
102050
102051 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
102052-static atomic_t addr_str_seq = ATOMIC_INIT(0);
102053+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
102054
102055 static struct page *zero_page; /* used in certain error cases */
102056
102057@@ -199,7 +199,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
102058 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
102059 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
102060
102061- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
102062+ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
102063 s = addr_str[i];
102064
102065 switch (ss->ss_family) {
102066@@ -292,7 +292,11 @@ int ceph_msgr_init(void)
102067 if (ceph_msgr_slab_init())
102068 return -ENOMEM;
102069
102070- ceph_msgr_wq = alloc_workqueue("ceph-msgr", 0, 0);
102071+ /*
102072+ * The number of active work items is limited by the number of
102073+ * connections, so leave @max_active at default.
102074+ */
102075+ ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0);
102076 if (ceph_msgr_wq)
102077 return 0;
102078
102079diff --git a/net/compat.c b/net/compat.c
102080index bc8aeef..f9c070c 100644
102081--- a/net/compat.c
102082+++ b/net/compat.c
102083@@ -73,9 +73,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
102084 return -EFAULT;
102085 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
102086 kmsg->msg_namelen = sizeof(struct sockaddr_storage);
102087- kmsg->msg_name = compat_ptr(tmp1);
102088- kmsg->msg_iov = compat_ptr(tmp2);
102089- kmsg->msg_control = compat_ptr(tmp3);
102090+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
102091+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
102092+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
102093 return 0;
102094 }
102095
102096@@ -87,7 +87,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
102097
102098 if (kern_msg->msg_name && kern_msg->msg_namelen) {
102099 if (mode == VERIFY_READ) {
102100- int err = move_addr_to_kernel(kern_msg->msg_name,
102101+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
102102 kern_msg->msg_namelen,
102103 kern_address);
102104 if (err < 0)
102105@@ -100,7 +100,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
102106 }
102107
102108 tot_len = iov_from_user_compat_to_kern(kern_iov,
102109- (struct compat_iovec __user *)kern_msg->msg_iov,
102110+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
102111 kern_msg->msg_iovlen);
102112 if (tot_len >= 0)
102113 kern_msg->msg_iov = kern_iov;
102114@@ -120,20 +120,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
102115
102116 #define CMSG_COMPAT_FIRSTHDR(msg) \
102117 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
102118- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
102119+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
102120 (struct compat_cmsghdr __user *)NULL)
102121
102122 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
102123 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
102124 (ucmlen) <= (unsigned long) \
102125 ((mhdr)->msg_controllen - \
102126- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
102127+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
102128
102129 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
102130 struct compat_cmsghdr __user *cmsg, int cmsg_len)
102131 {
102132 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
102133- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
102134+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
102135 msg->msg_controllen)
102136 return NULL;
102137 return (struct compat_cmsghdr __user *)ptr;
102138@@ -223,7 +223,7 @@ Efault:
102139
102140 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
102141 {
102142- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
102143+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
102144 struct compat_cmsghdr cmhdr;
102145 struct compat_timeval ctv;
102146 struct compat_timespec cts[3];
102147@@ -279,7 +279,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
102148
102149 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
102150 {
102151- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
102152+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
102153 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
102154 int fdnum = scm->fp->count;
102155 struct file **fp = scm->fp->fp;
102156@@ -367,7 +367,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
102157 return -EFAULT;
102158 old_fs = get_fs();
102159 set_fs(KERNEL_DS);
102160- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
102161+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
102162 set_fs(old_fs);
102163
102164 return err;
102165@@ -428,7 +428,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
102166 len = sizeof(ktime);
102167 old_fs = get_fs();
102168 set_fs(KERNEL_DS);
102169- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
102170+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
102171 set_fs(old_fs);
102172
102173 if (!err) {
102174@@ -571,7 +571,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
102175 case MCAST_JOIN_GROUP:
102176 case MCAST_LEAVE_GROUP:
102177 {
102178- struct compat_group_req __user *gr32 = (void *)optval;
102179+ struct compat_group_req __user *gr32 = (void __user *)optval;
102180 struct group_req __user *kgr =
102181 compat_alloc_user_space(sizeof(struct group_req));
102182 u32 interface;
102183@@ -592,7 +592,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
102184 case MCAST_BLOCK_SOURCE:
102185 case MCAST_UNBLOCK_SOURCE:
102186 {
102187- struct compat_group_source_req __user *gsr32 = (void *)optval;
102188+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
102189 struct group_source_req __user *kgsr = compat_alloc_user_space(
102190 sizeof(struct group_source_req));
102191 u32 interface;
102192@@ -613,7 +613,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
102193 }
102194 case MCAST_MSFILTER:
102195 {
102196- struct compat_group_filter __user *gf32 = (void *)optval;
102197+ struct compat_group_filter __user *gf32 = (void __user *)optval;
102198 struct group_filter __user *kgf;
102199 u32 interface, fmode, numsrc;
102200
102201@@ -651,7 +651,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
102202 char __user *optval, int __user *optlen,
102203 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
102204 {
102205- struct compat_group_filter __user *gf32 = (void *)optval;
102206+ struct compat_group_filter __user *gf32 = (void __user *)optval;
102207 struct group_filter __user *kgf;
102208 int __user *koptlen;
102209 u32 interface, fmode, numsrc;
102210@@ -804,7 +804,7 @@ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
102211
102212 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
102213 return -EINVAL;
102214- if (copy_from_user(a, args, nas[call]))
102215+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
102216 return -EFAULT;
102217 a0 = a[0];
102218 a1 = a[1];
102219diff --git a/net/core/datagram.c b/net/core/datagram.c
102220index fdbc9a8..cd6972c 100644
102221--- a/net/core/datagram.c
102222+++ b/net/core/datagram.c
102223@@ -301,7 +301,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
102224 }
102225
102226 kfree_skb(skb);
102227- atomic_inc(&sk->sk_drops);
102228+ atomic_inc_unchecked(&sk->sk_drops);
102229 sk_mem_reclaim_partial(sk);
102230
102231 return err;
102232diff --git a/net/core/dev.c b/net/core/dev.c
102233index cf8a95f..2837211 100644
102234--- a/net/core/dev.c
102235+++ b/net/core/dev.c
102236@@ -1683,14 +1683,14 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
102237 {
102238 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
102239 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
102240- atomic_long_inc(&dev->rx_dropped);
102241+ atomic_long_inc_unchecked(&dev->rx_dropped);
102242 kfree_skb(skb);
102243 return NET_RX_DROP;
102244 }
102245 }
102246
102247 if (unlikely(!is_skb_forwardable(dev, skb))) {
102248- atomic_long_inc(&dev->rx_dropped);
102249+ atomic_long_inc_unchecked(&dev->rx_dropped);
102250 kfree_skb(skb);
102251 return NET_RX_DROP;
102252 }
102253@@ -2487,7 +2487,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
102254
102255 struct dev_gso_cb {
102256 void (*destructor)(struct sk_buff *skb);
102257-};
102258+} __no_const;
102259
102260 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
102261
102262@@ -2952,7 +2952,7 @@ recursion_alert:
102263 rc = -ENETDOWN;
102264 rcu_read_unlock_bh();
102265
102266- atomic_long_inc(&dev->tx_dropped);
102267+ atomic_long_inc_unchecked(&dev->tx_dropped);
102268 kfree_skb(skb);
102269 return rc;
102270 out:
102271@@ -3296,7 +3296,7 @@ enqueue:
102272
102273 local_irq_restore(flags);
102274
102275- atomic_long_inc(&skb->dev->rx_dropped);
102276+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
102277 kfree_skb(skb);
102278 return NET_RX_DROP;
102279 }
102280@@ -3373,7 +3373,7 @@ int netif_rx_ni(struct sk_buff *skb)
102281 }
102282 EXPORT_SYMBOL(netif_rx_ni);
102283
102284-static void net_tx_action(struct softirq_action *h)
102285+static __latent_entropy void net_tx_action(void)
102286 {
102287 struct softnet_data *sd = &__get_cpu_var(softnet_data);
102288
102289@@ -3706,7 +3706,7 @@ ncls:
102290 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
102291 } else {
102292 drop:
102293- atomic_long_inc(&skb->dev->rx_dropped);
102294+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
102295 kfree_skb(skb);
102296 /* Jamal, now you will not able to escape explaining
102297 * me how you were going to use this. :-)
102298@@ -4426,7 +4426,7 @@ void netif_napi_del(struct napi_struct *napi)
102299 }
102300 EXPORT_SYMBOL(netif_napi_del);
102301
102302-static void net_rx_action(struct softirq_action *h)
102303+static __latent_entropy void net_rx_action(void)
102304 {
102305 struct softnet_data *sd = &__get_cpu_var(softnet_data);
102306 unsigned long time_limit = jiffies + 2;
102307@@ -6480,8 +6480,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
102308 } else {
102309 netdev_stats_to_stats64(storage, &dev->stats);
102310 }
102311- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
102312- storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
102313+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
102314+ storage->tx_dropped += atomic_long_read_unchecked(&dev->tx_dropped);
102315 return storage;
102316 }
102317 EXPORT_SYMBOL(dev_get_stats);
102318diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
102319index cf999e0..c59a975 100644
102320--- a/net/core/dev_ioctl.c
102321+++ b/net/core/dev_ioctl.c
102322@@ -366,9 +366,13 @@ void dev_load(struct net *net, const char *name)
102323 if (no_module && capable(CAP_NET_ADMIN))
102324 no_module = request_module("netdev-%s", name);
102325 if (no_module && capable(CAP_SYS_MODULE)) {
102326+#ifdef CONFIG_GRKERNSEC_MODHARDEN
102327+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
102328+#else
102329 if (!request_module("%s", name))
102330 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
102331 name);
102332+#endif
102333 }
102334 }
102335 EXPORT_SYMBOL(dev_load);
102336diff --git a/net/core/filter.c b/net/core/filter.c
102337index d814b8a..b5ab778 100644
102338--- a/net/core/filter.c
102339+++ b/net/core/filter.c
102340@@ -559,7 +559,11 @@ do_pass:
102341
102342 /* Unkown instruction. */
102343 default:
102344- goto err;
102345+ WARN(1, KERN_ALERT "Unknown sock filter code:%u jt:%u tf:%u k:%u\n",
102346+ fp->code, fp->jt, fp->jf, fp->k);
102347+ kfree(addrs);
102348+ BUG();
102349+ return -EINVAL;
102350 }
102351
102352 insn++;
102353@@ -606,7 +610,7 @@ static int check_load_and_stores(const struct sock_filter *filter, int flen)
102354 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
102355 int pc, ret = 0;
102356
102357- BUILD_BUG_ON(BPF_MEMWORDS > 16);
102358+ BUILD_BUG_ON(BPF_MEMWORDS != 16);
102359
102360 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
102361 if (!masks)
102362@@ -933,7 +937,7 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
102363
102364 /* Expand fp for appending the new filter representation. */
102365 old_fp = fp;
102366- fp = krealloc(old_fp, bpf_prog_size(new_len), GFP_KERNEL);
102367+ fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0);
102368 if (!fp) {
102369 /* The old_fp is still around in case we couldn't
102370 * allocate new memory, so uncharge on that one.
102371@@ -1013,11 +1017,11 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
102372 if (fprog->filter == NULL)
102373 return -EINVAL;
102374
102375- fp = kmalloc(bpf_prog_size(fprog->len), GFP_KERNEL);
102376+ fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
102377 if (!fp)
102378 return -ENOMEM;
102379
102380- memcpy(fp->insns, fprog->filter, fsize);
102381+ memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
102382
102383 fp->len = fprog->len;
102384 /* Since unattached filters are not copied back to user
102385@@ -1069,12 +1073,12 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
102386 if (fprog->filter == NULL)
102387 return -EINVAL;
102388
102389- prog = kmalloc(bpf_fsize, GFP_KERNEL);
102390+ prog = bpf_prog_alloc(bpf_fsize, 0);
102391 if (!prog)
102392 return -ENOMEM;
102393
102394 if (copy_from_user(prog->insns, fprog->filter, fsize)) {
102395- kfree(prog);
102396+ __bpf_prog_free(prog);
102397 return -EFAULT;
102398 }
102399
102400@@ -1082,7 +1086,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
102401
102402 err = bpf_prog_store_orig_filter(prog, fprog);
102403 if (err) {
102404- kfree(prog);
102405+ __bpf_prog_free(prog);
102406 return -ENOMEM;
102407 }
102408
102409diff --git a/net/core/flow.c b/net/core/flow.c
102410index a0348fd..6951c76 100644
102411--- a/net/core/flow.c
102412+++ b/net/core/flow.c
102413@@ -65,7 +65,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
102414 static int flow_entry_valid(struct flow_cache_entry *fle,
102415 struct netns_xfrm *xfrm)
102416 {
102417- if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
102418+ if (atomic_read_unchecked(&xfrm->flow_cache_genid) != fle->genid)
102419 return 0;
102420 if (fle->object && !fle->object->ops->check(fle->object))
102421 return 0;
102422@@ -242,7 +242,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
102423 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
102424 fcp->hash_count++;
102425 }
102426- } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
102427+ } else if (likely(fle->genid == atomic_read_unchecked(&net->xfrm.flow_cache_genid))) {
102428 flo = fle->object;
102429 if (!flo)
102430 goto ret_object;
102431@@ -263,7 +263,7 @@ nocache:
102432 }
102433 flo = resolver(net, key, family, dir, flo, ctx);
102434 if (fle) {
102435- fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
102436+ fle->genid = atomic_read_unchecked(&net->xfrm.flow_cache_genid);
102437 if (!IS_ERR(flo))
102438 fle->object = flo;
102439 else
102440diff --git a/net/core/iovec.c b/net/core/iovec.c
102441index e1ec45a..e5c6f16 100644
102442--- a/net/core/iovec.c
102443+++ b/net/core/iovec.c
102444@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
102445 if (m->msg_name && m->msg_namelen) {
102446 if (mode == VERIFY_READ) {
102447 void __user *namep;
102448- namep = (void __user __force *) m->msg_name;
102449+ namep = (void __force_user *) m->msg_name;
102450 err = move_addr_to_kernel(namep, m->msg_namelen,
102451 address);
102452 if (err < 0)
102453@@ -55,7 +55,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
102454 }
102455
102456 size = m->msg_iovlen * sizeof(struct iovec);
102457- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
102458+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
102459 return -EFAULT;
102460
102461 m->msg_iov = iov;
102462diff --git a/net/core/neighbour.c b/net/core/neighbour.c
102463index ef31fef..8be66d9 100644
102464--- a/net/core/neighbour.c
102465+++ b/net/core/neighbour.c
102466@@ -2825,7 +2825,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
102467 void __user *buffer, size_t *lenp, loff_t *ppos)
102468 {
102469 int size, ret;
102470- struct ctl_table tmp = *ctl;
102471+ ctl_table_no_const tmp = *ctl;
102472
102473 tmp.extra1 = &zero;
102474 tmp.extra2 = &unres_qlen_max;
102475@@ -2887,7 +2887,7 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
102476 void __user *buffer,
102477 size_t *lenp, loff_t *ppos)
102478 {
102479- struct ctl_table tmp = *ctl;
102480+ ctl_table_no_const tmp = *ctl;
102481 int ret;
102482
102483 tmp.extra1 = &zero;
102484diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
102485index 2bf8329..2eb1423 100644
102486--- a/net/core/net-procfs.c
102487+++ b/net/core/net-procfs.c
102488@@ -79,7 +79,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
102489 struct rtnl_link_stats64 temp;
102490 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
102491
102492- seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
102493+ if (gr_proc_is_restricted())
102494+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
102495+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
102496+ dev->name, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
102497+ 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL);
102498+ else
102499+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
102500 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
102501 dev->name, stats->rx_bytes, stats->rx_packets,
102502 stats->rx_errors,
102503@@ -166,7 +172,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
102504 return 0;
102505 }
102506
102507-static const struct seq_operations dev_seq_ops = {
102508+const struct seq_operations dev_seq_ops = {
102509 .start = dev_seq_start,
102510 .next = dev_seq_next,
102511 .stop = dev_seq_stop,
102512@@ -196,7 +202,7 @@ static const struct seq_operations softnet_seq_ops = {
102513
102514 static int softnet_seq_open(struct inode *inode, struct file *file)
102515 {
102516- return seq_open(file, &softnet_seq_ops);
102517+ return seq_open_restrict(file, &softnet_seq_ops);
102518 }
102519
102520 static const struct file_operations softnet_seq_fops = {
102521@@ -283,8 +289,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
102522 else
102523 seq_printf(seq, "%04x", ntohs(pt->type));
102524
102525+#ifdef CONFIG_GRKERNSEC_HIDESYM
102526+ seq_printf(seq, " %-8s %pf\n",
102527+ pt->dev ? pt->dev->name : "", NULL);
102528+#else
102529 seq_printf(seq, " %-8s %pf\n",
102530 pt->dev ? pt->dev->name : "", pt->func);
102531+#endif
102532 }
102533
102534 return 0;
102535diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
102536index 9dd0669..c52fb1b 100644
102537--- a/net/core/net-sysfs.c
102538+++ b/net/core/net-sysfs.c
102539@@ -278,7 +278,7 @@ static ssize_t carrier_changes_show(struct device *dev,
102540 {
102541 struct net_device *netdev = to_net_dev(dev);
102542 return sprintf(buf, fmt_dec,
102543- atomic_read(&netdev->carrier_changes));
102544+ atomic_read_unchecked(&netdev->carrier_changes));
102545 }
102546 static DEVICE_ATTR_RO(carrier_changes);
102547
102548diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
102549index 7c6b51a..e9dd57f 100644
102550--- a/net/core/net_namespace.c
102551+++ b/net/core/net_namespace.c
102552@@ -445,7 +445,7 @@ static int __register_pernet_operations(struct list_head *list,
102553 int error;
102554 LIST_HEAD(net_exit_list);
102555
102556- list_add_tail(&ops->list, list);
102557+ pax_list_add_tail((struct list_head *)&ops->list, list);
102558 if (ops->init || (ops->id && ops->size)) {
102559 for_each_net(net) {
102560 error = ops_init(ops, net);
102561@@ -458,7 +458,7 @@ static int __register_pernet_operations(struct list_head *list,
102562
102563 out_undo:
102564 /* If I have an error cleanup all namespaces I initialized */
102565- list_del(&ops->list);
102566+ pax_list_del((struct list_head *)&ops->list);
102567 ops_exit_list(ops, &net_exit_list);
102568 ops_free_list(ops, &net_exit_list);
102569 return error;
102570@@ -469,7 +469,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
102571 struct net *net;
102572 LIST_HEAD(net_exit_list);
102573
102574- list_del(&ops->list);
102575+ pax_list_del((struct list_head *)&ops->list);
102576 for_each_net(net)
102577 list_add_tail(&net->exit_list, &net_exit_list);
102578 ops_exit_list(ops, &net_exit_list);
102579@@ -603,7 +603,7 @@ int register_pernet_device(struct pernet_operations *ops)
102580 mutex_lock(&net_mutex);
102581 error = register_pernet_operations(&pernet_list, ops);
102582 if (!error && (first_device == &pernet_list))
102583- first_device = &ops->list;
102584+ first_device = (struct list_head *)&ops->list;
102585 mutex_unlock(&net_mutex);
102586 return error;
102587 }
102588diff --git a/net/core/netpoll.c b/net/core/netpoll.c
102589index 907fb5e..8260f040b 100644
102590--- a/net/core/netpoll.c
102591+++ b/net/core/netpoll.c
102592@@ -382,7 +382,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
102593 struct udphdr *udph;
102594 struct iphdr *iph;
102595 struct ethhdr *eth;
102596- static atomic_t ip_ident;
102597+ static atomic_unchecked_t ip_ident;
102598 struct ipv6hdr *ip6h;
102599
102600 udp_len = len + sizeof(*udph);
102601@@ -453,7 +453,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
102602 put_unaligned(0x45, (unsigned char *)iph);
102603 iph->tos = 0;
102604 put_unaligned(htons(ip_len), &(iph->tot_len));
102605- iph->id = htons(atomic_inc_return(&ip_ident));
102606+ iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
102607 iph->frag_off = 0;
102608 iph->ttl = 64;
102609 iph->protocol = IPPROTO_UDP;
102610diff --git a/net/core/pktgen.c b/net/core/pktgen.c
102611index 8b849dd..cd88bfc 100644
102612--- a/net/core/pktgen.c
102613+++ b/net/core/pktgen.c
102614@@ -3723,7 +3723,7 @@ static int __net_init pg_net_init(struct net *net)
102615 pn->net = net;
102616 INIT_LIST_HEAD(&pn->pktgen_threads);
102617 pn->pktgen_exiting = false;
102618- pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
102619+ pn->proc_dir = proc_mkdir_restrict(PG_PROC_DIR, pn->net->proc_net);
102620 if (!pn->proc_dir) {
102621 pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
102622 return -ENODEV;
102623diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
102624index f0493e3..0f43f7a 100644
102625--- a/net/core/rtnetlink.c
102626+++ b/net/core/rtnetlink.c
102627@@ -58,7 +58,7 @@ struct rtnl_link {
102628 rtnl_doit_func doit;
102629 rtnl_dumpit_func dumpit;
102630 rtnl_calcit_func calcit;
102631-};
102632+} __no_const;
102633
102634 static DEFINE_MUTEX(rtnl_mutex);
102635
102636@@ -304,10 +304,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
102637 * to use the ops for creating device. So do not
102638 * fill up dellink as well. That disables rtnl_dellink.
102639 */
102640- if (ops->setup && !ops->dellink)
102641- ops->dellink = unregister_netdevice_queue;
102642+ if (ops->setup && !ops->dellink) {
102643+ pax_open_kernel();
102644+ *(void **)&ops->dellink = unregister_netdevice_queue;
102645+ pax_close_kernel();
102646+ }
102647
102648- list_add_tail(&ops->list, &link_ops);
102649+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
102650 return 0;
102651 }
102652 EXPORT_SYMBOL_GPL(__rtnl_link_register);
102653@@ -354,7 +357,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
102654 for_each_net(net) {
102655 __rtnl_kill_links(net, ops);
102656 }
102657- list_del(&ops->list);
102658+ pax_list_del((struct list_head *)&ops->list);
102659 }
102660 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
102661
102662@@ -1014,7 +1017,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
102663 (dev->ifalias &&
102664 nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
102665 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
102666- atomic_read(&dev->carrier_changes)))
102667+ atomic_read_unchecked(&dev->carrier_changes)))
102668 goto nla_put_failure;
102669
102670 if (1) {
102671diff --git a/net/core/scm.c b/net/core/scm.c
102672index b442e7e..6f5b5a2 100644
102673--- a/net/core/scm.c
102674+++ b/net/core/scm.c
102675@@ -210,7 +210,7 @@ EXPORT_SYMBOL(__scm_send);
102676 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
102677 {
102678 struct cmsghdr __user *cm
102679- = (__force struct cmsghdr __user *)msg->msg_control;
102680+ = (struct cmsghdr __force_user *)msg->msg_control;
102681 struct cmsghdr cmhdr;
102682 int cmlen = CMSG_LEN(len);
102683 int err;
102684@@ -233,7 +233,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
102685 err = -EFAULT;
102686 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
102687 goto out;
102688- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
102689+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
102690 goto out;
102691 cmlen = CMSG_SPACE(len);
102692 if (msg->msg_controllen < cmlen)
102693@@ -249,7 +249,7 @@ EXPORT_SYMBOL(put_cmsg);
102694 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
102695 {
102696 struct cmsghdr __user *cm
102697- = (__force struct cmsghdr __user*)msg->msg_control;
102698+ = (struct cmsghdr __force_user *)msg->msg_control;
102699
102700 int fdmax = 0;
102701 int fdnum = scm->fp->count;
102702@@ -269,7 +269,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
102703 if (fdnum < fdmax)
102704 fdmax = fdnum;
102705
102706- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
102707+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
102708 i++, cmfptr++)
102709 {
102710 struct socket *sock;
102711diff --git a/net/core/skbuff.c b/net/core/skbuff.c
102712index 8d28969..4d36260 100644
102713--- a/net/core/skbuff.c
102714+++ b/net/core/skbuff.c
102715@@ -360,18 +360,29 @@ refill:
102716 goto end;
102717 }
102718 nc->frag.size = PAGE_SIZE << order;
102719-recycle:
102720- atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS);
102721+ /* Even if we own the page, we do not use atomic_set().
102722+ * This would break get_page_unless_zero() users.
102723+ */
102724+ atomic_add(NETDEV_PAGECNT_MAX_BIAS - 1,
102725+ &nc->frag.page->_count);
102726 nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
102727 nc->frag.offset = 0;
102728 }
102729
102730 if (nc->frag.offset + fragsz > nc->frag.size) {
102731- /* avoid unnecessary locked operations if possible */
102732- if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) ||
102733- atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count))
102734- goto recycle;
102735- goto refill;
102736+ if (atomic_read(&nc->frag.page->_count) != nc->pagecnt_bias) {
102737+ if (!atomic_sub_and_test(nc->pagecnt_bias,
102738+ &nc->frag.page->_count))
102739+ goto refill;
102740+ /* OK, page count is 0, we can safely set it */
102741+ atomic_set(&nc->frag.page->_count,
102742+ NETDEV_PAGECNT_MAX_BIAS);
102743+ } else {
102744+ atomic_add(NETDEV_PAGECNT_MAX_BIAS - nc->pagecnt_bias,
102745+ &nc->frag.page->_count);
102746+ }
102747+ nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
102748+ nc->frag.offset = 0;
102749 }
102750
102751 data = page_address(nc->frag.page) + nc->frag.offset;
102752@@ -2011,7 +2022,7 @@ EXPORT_SYMBOL(__skb_checksum);
102753 __wsum skb_checksum(const struct sk_buff *skb, int offset,
102754 int len, __wsum csum)
102755 {
102756- const struct skb_checksum_ops ops = {
102757+ static const struct skb_checksum_ops ops = {
102758 .update = csum_partial_ext,
102759 .combine = csum_block_add_ext,
102760 };
102761@@ -3237,13 +3248,15 @@ void __init skb_init(void)
102762 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
102763 sizeof(struct sk_buff),
102764 0,
102765- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
102766+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
102767+ SLAB_NO_SANITIZE,
102768 NULL);
102769 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
102770 (2*sizeof(struct sk_buff)) +
102771 sizeof(atomic_t),
102772 0,
102773- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
102774+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
102775+ SLAB_NO_SANITIZE,
102776 NULL);
102777 }
102778
102779diff --git a/net/core/sock.c b/net/core/sock.c
102780index 9c3f823..bd8c884 100644
102781--- a/net/core/sock.c
102782+++ b/net/core/sock.c
102783@@ -442,7 +442,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
102784 struct sk_buff_head *list = &sk->sk_receive_queue;
102785
102786 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
102787- atomic_inc(&sk->sk_drops);
102788+ atomic_inc_unchecked(&sk->sk_drops);
102789 trace_sock_rcvqueue_full(sk, skb);
102790 return -ENOMEM;
102791 }
102792@@ -452,7 +452,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
102793 return err;
102794
102795 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
102796- atomic_inc(&sk->sk_drops);
102797+ atomic_inc_unchecked(&sk->sk_drops);
102798 return -ENOBUFS;
102799 }
102800
102801@@ -472,7 +472,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
102802 skb_dst_force(skb);
102803
102804 spin_lock_irqsave(&list->lock, flags);
102805- skb->dropcount = atomic_read(&sk->sk_drops);
102806+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
102807 __skb_queue_tail(list, skb);
102808 spin_unlock_irqrestore(&list->lock, flags);
102809
102810@@ -492,7 +492,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
102811 skb->dev = NULL;
102812
102813 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
102814- atomic_inc(&sk->sk_drops);
102815+ atomic_inc_unchecked(&sk->sk_drops);
102816 goto discard_and_relse;
102817 }
102818 if (nested)
102819@@ -510,7 +510,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
102820 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
102821 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
102822 bh_unlock_sock(sk);
102823- atomic_inc(&sk->sk_drops);
102824+ atomic_inc_unchecked(&sk->sk_drops);
102825 goto discard_and_relse;
102826 }
102827
102828@@ -999,12 +999,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
102829 struct timeval tm;
102830 } v;
102831
102832- int lv = sizeof(int);
102833- int len;
102834+ unsigned int lv = sizeof(int);
102835+ unsigned int len;
102836
102837 if (get_user(len, optlen))
102838 return -EFAULT;
102839- if (len < 0)
102840+ if (len > INT_MAX)
102841 return -EINVAL;
102842
102843 memset(&v, 0, sizeof(v));
102844@@ -1142,11 +1142,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
102845
102846 case SO_PEERNAME:
102847 {
102848- char address[128];
102849+ char address[_K_SS_MAXSIZE];
102850
102851 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
102852 return -ENOTCONN;
102853- if (lv < len)
102854+ if (lv < len || sizeof address < len)
102855 return -EINVAL;
102856 if (copy_to_user(optval, address, len))
102857 return -EFAULT;
102858@@ -1227,7 +1227,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
102859
102860 if (len > lv)
102861 len = lv;
102862- if (copy_to_user(optval, &v, len))
102863+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
102864 return -EFAULT;
102865 lenout:
102866 if (put_user(len, optlen))
102867@@ -1723,6 +1723,8 @@ EXPORT_SYMBOL(sock_kmalloc);
102868 */
102869 void sock_kfree_s(struct sock *sk, void *mem, int size)
102870 {
102871+ if (WARN_ON_ONCE(!mem))
102872+ return;
102873 kfree(mem);
102874 atomic_sub(size, &sk->sk_omem_alloc);
102875 }
102876@@ -2369,7 +2371,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
102877 */
102878 smp_wmb();
102879 atomic_set(&sk->sk_refcnt, 1);
102880- atomic_set(&sk->sk_drops, 0);
102881+ atomic_set_unchecked(&sk->sk_drops, 0);
102882 }
102883 EXPORT_SYMBOL(sock_init_data);
102884
102885@@ -2497,6 +2499,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
102886 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
102887 int level, int type)
102888 {
102889+ struct sock_extended_err ee;
102890 struct sock_exterr_skb *serr;
102891 struct sk_buff *skb, *skb2;
102892 int copied, err;
102893@@ -2518,7 +2521,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
102894 sock_recv_timestamp(msg, sk, skb);
102895
102896 serr = SKB_EXT_ERR(skb);
102897- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
102898+ ee = serr->ee;
102899+ put_cmsg(msg, level, type, sizeof ee, &ee);
102900
102901 msg->msg_flags |= MSG_ERRQUEUE;
102902 err = copied;
102903diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
102904index ad704c7..ca48aff 100644
102905--- a/net/core/sock_diag.c
102906+++ b/net/core/sock_diag.c
102907@@ -9,26 +9,33 @@
102908 #include <linux/inet_diag.h>
102909 #include <linux/sock_diag.h>
102910
102911-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
102912+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
102913 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
102914 static DEFINE_MUTEX(sock_diag_table_mutex);
102915
102916 int sock_diag_check_cookie(void *sk, __u32 *cookie)
102917 {
102918+#ifndef CONFIG_GRKERNSEC_HIDESYM
102919 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
102920 cookie[1] != INET_DIAG_NOCOOKIE) &&
102921 ((u32)(unsigned long)sk != cookie[0] ||
102922 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
102923 return -ESTALE;
102924 else
102925+#endif
102926 return 0;
102927 }
102928 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
102929
102930 void sock_diag_save_cookie(void *sk, __u32 *cookie)
102931 {
102932+#ifdef CONFIG_GRKERNSEC_HIDESYM
102933+ cookie[0] = 0;
102934+ cookie[1] = 0;
102935+#else
102936 cookie[0] = (u32)(unsigned long)sk;
102937 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
102938+#endif
102939 }
102940 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
102941
102942@@ -110,8 +117,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
102943 mutex_lock(&sock_diag_table_mutex);
102944 if (sock_diag_handlers[hndl->family])
102945 err = -EBUSY;
102946- else
102947+ else {
102948+ pax_open_kernel();
102949 sock_diag_handlers[hndl->family] = hndl;
102950+ pax_close_kernel();
102951+ }
102952 mutex_unlock(&sock_diag_table_mutex);
102953
102954 return err;
102955@@ -127,7 +137,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
102956
102957 mutex_lock(&sock_diag_table_mutex);
102958 BUG_ON(sock_diag_handlers[family] != hnld);
102959+ pax_open_kernel();
102960 sock_diag_handlers[family] = NULL;
102961+ pax_close_kernel();
102962 mutex_unlock(&sock_diag_table_mutex);
102963 }
102964 EXPORT_SYMBOL_GPL(sock_diag_unregister);
102965diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
102966index cf9cd13..50683950 100644
102967--- a/net/core/sysctl_net_core.c
102968+++ b/net/core/sysctl_net_core.c
102969@@ -32,7 +32,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
102970 {
102971 unsigned int orig_size, size;
102972 int ret, i;
102973- struct ctl_table tmp = {
102974+ ctl_table_no_const tmp = {
102975 .data = &size,
102976 .maxlen = sizeof(size),
102977 .mode = table->mode
102978@@ -200,7 +200,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
102979 void __user *buffer, size_t *lenp, loff_t *ppos)
102980 {
102981 char id[IFNAMSIZ];
102982- struct ctl_table tbl = {
102983+ ctl_table_no_const tbl = {
102984 .data = id,
102985 .maxlen = IFNAMSIZ,
102986 };
102987@@ -263,7 +263,7 @@ static struct ctl_table net_core_table[] = {
102988 .mode = 0644,
102989 .proc_handler = proc_dointvec
102990 },
102991-#ifdef CONFIG_BPF_JIT
102992+#if defined(CONFIG_BPF_JIT) && !defined(CONFIG_GRKERNSEC_BPF_HARDEN)
102993 {
102994 .procname = "bpf_jit_enable",
102995 .data = &bpf_jit_enable,
102996@@ -379,13 +379,12 @@ static struct ctl_table netns_core_table[] = {
102997
102998 static __net_init int sysctl_core_net_init(struct net *net)
102999 {
103000- struct ctl_table *tbl;
103001+ ctl_table_no_const *tbl = NULL;
103002
103003 net->core.sysctl_somaxconn = SOMAXCONN;
103004
103005- tbl = netns_core_table;
103006 if (!net_eq(net, &init_net)) {
103007- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
103008+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
103009 if (tbl == NULL)
103010 goto err_dup;
103011
103012@@ -395,17 +394,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
103013 if (net->user_ns != &init_user_ns) {
103014 tbl[0].procname = NULL;
103015 }
103016- }
103017-
103018- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
103019+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
103020+ } else
103021+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
103022 if (net->core.sysctl_hdr == NULL)
103023 goto err_reg;
103024
103025 return 0;
103026
103027 err_reg:
103028- if (tbl != netns_core_table)
103029- kfree(tbl);
103030+ kfree(tbl);
103031 err_dup:
103032 return -ENOMEM;
103033 }
103034@@ -420,7 +418,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
103035 kfree(tbl);
103036 }
103037
103038-static __net_initdata struct pernet_operations sysctl_core_ops = {
103039+static __net_initconst struct pernet_operations sysctl_core_ops = {
103040 .init = sysctl_core_net_init,
103041 .exit = sysctl_core_net_exit,
103042 };
103043diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
103044index ae011b4..d2d18bf 100644
103045--- a/net/decnet/af_decnet.c
103046+++ b/net/decnet/af_decnet.c
103047@@ -465,6 +465,7 @@ static struct proto dn_proto = {
103048 .sysctl_rmem = sysctl_decnet_rmem,
103049 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
103050 .obj_size = sizeof(struct dn_sock),
103051+ .slab_flags = SLAB_USERCOPY,
103052 };
103053
103054 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
103055diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
103056index 3b726f3..1af6368 100644
103057--- a/net/decnet/dn_dev.c
103058+++ b/net/decnet/dn_dev.c
103059@@ -200,7 +200,7 @@ static struct dn_dev_sysctl_table {
103060 .extra1 = &min_t3,
103061 .extra2 = &max_t3
103062 },
103063- {0}
103064+ { }
103065 },
103066 };
103067
103068diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
103069index 5325b54..a0d4d69 100644
103070--- a/net/decnet/sysctl_net_decnet.c
103071+++ b/net/decnet/sysctl_net_decnet.c
103072@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
103073
103074 if (len > *lenp) len = *lenp;
103075
103076- if (copy_to_user(buffer, addr, len))
103077+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
103078 return -EFAULT;
103079
103080 *lenp = len;
103081@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
103082
103083 if (len > *lenp) len = *lenp;
103084
103085- if (copy_to_user(buffer, devname, len))
103086+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
103087 return -EFAULT;
103088
103089 *lenp = len;
103090diff --git a/net/ieee802154/reassembly.c b/net/ieee802154/reassembly.c
103091index 32755cb..236d827 100644
103092--- a/net/ieee802154/reassembly.c
103093+++ b/net/ieee802154/reassembly.c
103094@@ -433,14 +433,13 @@ static struct ctl_table lowpan_frags_ctl_table[] = {
103095
103096 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
103097 {
103098- struct ctl_table *table;
103099+ ctl_table_no_const *table = NULL;
103100 struct ctl_table_header *hdr;
103101 struct netns_ieee802154_lowpan *ieee802154_lowpan =
103102 net_ieee802154_lowpan(net);
103103
103104- table = lowpan_frags_ns_ctl_table;
103105 if (!net_eq(net, &init_net)) {
103106- table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
103107+ table = kmemdup(lowpan_frags_ns_ctl_table, sizeof(lowpan_frags_ns_ctl_table),
103108 GFP_KERNEL);
103109 if (table == NULL)
103110 goto err_alloc;
103111@@ -455,9 +454,9 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
103112 /* Don't export sysctls to unprivileged users */
103113 if (net->user_ns != &init_user_ns)
103114 table[0].procname = NULL;
103115- }
103116-
103117- hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
103118+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
103119+ } else
103120+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", lowpan_frags_ns_ctl_table);
103121 if (hdr == NULL)
103122 goto err_reg;
103123
103124@@ -465,8 +464,7 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
103125 return 0;
103126
103127 err_reg:
103128- if (!net_eq(net, &init_net))
103129- kfree(table);
103130+ kfree(table);
103131 err_alloc:
103132 return -ENOMEM;
103133 }
103134diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
103135index 214882e..f958b50 100644
103136--- a/net/ipv4/devinet.c
103137+++ b/net/ipv4/devinet.c
103138@@ -1548,7 +1548,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
103139 idx = 0;
103140 head = &net->dev_index_head[h];
103141 rcu_read_lock();
103142- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
103143+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
103144 net->dev_base_seq;
103145 hlist_for_each_entry_rcu(dev, head, index_hlist) {
103146 if (idx < s_idx)
103147@@ -1866,7 +1866,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
103148 idx = 0;
103149 head = &net->dev_index_head[h];
103150 rcu_read_lock();
103151- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
103152+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
103153 net->dev_base_seq;
103154 hlist_for_each_entry_rcu(dev, head, index_hlist) {
103155 if (idx < s_idx)
103156@@ -2101,7 +2101,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
103157 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
103158 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
103159
103160-static struct devinet_sysctl_table {
103161+static const struct devinet_sysctl_table {
103162 struct ctl_table_header *sysctl_header;
103163 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
103164 } devinet_sysctl = {
103165@@ -2233,7 +2233,7 @@ static __net_init int devinet_init_net(struct net *net)
103166 int err;
103167 struct ipv4_devconf *all, *dflt;
103168 #ifdef CONFIG_SYSCTL
103169- struct ctl_table *tbl = ctl_forward_entry;
103170+ ctl_table_no_const *tbl = NULL;
103171 struct ctl_table_header *forw_hdr;
103172 #endif
103173
103174@@ -2251,7 +2251,7 @@ static __net_init int devinet_init_net(struct net *net)
103175 goto err_alloc_dflt;
103176
103177 #ifdef CONFIG_SYSCTL
103178- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
103179+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
103180 if (tbl == NULL)
103181 goto err_alloc_ctl;
103182
103183@@ -2271,7 +2271,10 @@ static __net_init int devinet_init_net(struct net *net)
103184 goto err_reg_dflt;
103185
103186 err = -ENOMEM;
103187- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
103188+ if (!net_eq(net, &init_net))
103189+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
103190+ else
103191+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
103192 if (forw_hdr == NULL)
103193 goto err_reg_ctl;
103194 net->ipv4.forw_hdr = forw_hdr;
103195@@ -2287,8 +2290,7 @@ err_reg_ctl:
103196 err_reg_dflt:
103197 __devinet_sysctl_unregister(all);
103198 err_reg_all:
103199- if (tbl != ctl_forward_entry)
103200- kfree(tbl);
103201+ kfree(tbl);
103202 err_alloc_ctl:
103203 #endif
103204 if (dflt != &ipv4_devconf_dflt)
103205diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
103206index 255aa99..45c78f8 100644
103207--- a/net/ipv4/fib_frontend.c
103208+++ b/net/ipv4/fib_frontend.c
103209@@ -1015,12 +1015,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
103210 #ifdef CONFIG_IP_ROUTE_MULTIPATH
103211 fib_sync_up(dev);
103212 #endif
103213- atomic_inc(&net->ipv4.dev_addr_genid);
103214+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
103215 rt_cache_flush(dev_net(dev));
103216 break;
103217 case NETDEV_DOWN:
103218 fib_del_ifaddr(ifa, NULL);
103219- atomic_inc(&net->ipv4.dev_addr_genid);
103220+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
103221 if (ifa->ifa_dev->ifa_list == NULL) {
103222 /* Last address was deleted from this interface.
103223 * Disable IP.
103224@@ -1058,7 +1058,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
103225 #ifdef CONFIG_IP_ROUTE_MULTIPATH
103226 fib_sync_up(dev);
103227 #endif
103228- atomic_inc(&net->ipv4.dev_addr_genid);
103229+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
103230 rt_cache_flush(net);
103231 break;
103232 case NETDEV_DOWN:
103233diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
103234index b10cd43a..22327f9 100644
103235--- a/net/ipv4/fib_semantics.c
103236+++ b/net/ipv4/fib_semantics.c
103237@@ -768,7 +768,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
103238 nh->nh_saddr = inet_select_addr(nh->nh_dev,
103239 nh->nh_gw,
103240 nh->nh_parent->fib_scope);
103241- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
103242+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
103243
103244 return nh->nh_saddr;
103245 }
103246diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
103247index 6556263..db77807 100644
103248--- a/net/ipv4/gre_offload.c
103249+++ b/net/ipv4/gre_offload.c
103250@@ -59,13 +59,13 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
103251 if (csum)
103252 skb->encap_hdr_csum = 1;
103253
103254- if (unlikely(!pskb_may_pull(skb, ghl)))
103255- goto out;
103256-
103257 /* setup inner skb. */
103258 skb->protocol = greh->protocol;
103259 skb->encapsulation = 0;
103260
103261+ if (unlikely(!pskb_may_pull(skb, ghl)))
103262+ goto out;
103263+
103264 __skb_pull(skb, ghl);
103265 skb_reset_mac_header(skb);
103266 skb_set_network_header(skb, skb_inner_network_offset(skb));
103267diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
103268index 43116e8..ba0916a8 100644
103269--- a/net/ipv4/inet_hashtables.c
103270+++ b/net/ipv4/inet_hashtables.c
103271@@ -18,6 +18,7 @@
103272 #include <linux/sched.h>
103273 #include <linux/slab.h>
103274 #include <linux/wait.h>
103275+#include <linux/security.h>
103276
103277 #include <net/inet_connection_sock.h>
103278 #include <net/inet_hashtables.h>
103279@@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk)
103280 return inet_ehashfn(net, laddr, lport, faddr, fport);
103281 }
103282
103283+extern void gr_update_task_in_ip_table(const struct inet_sock *inet);
103284+
103285 /*
103286 * Allocate and initialize a new local port bind bucket.
103287 * The bindhash mutex for snum's hash chain must be held here.
103288@@ -554,6 +557,8 @@ ok:
103289 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
103290 spin_unlock(&head->lock);
103291
103292+ gr_update_task_in_ip_table(inet_sk(sk));
103293+
103294 if (tw) {
103295 inet_twsk_deschedule(tw, death_row);
103296 while (twrefcnt) {
103297diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
103298index bd5f592..e80e605 100644
103299--- a/net/ipv4/inetpeer.c
103300+++ b/net/ipv4/inetpeer.c
103301@@ -482,7 +482,7 @@ relookup:
103302 if (p) {
103303 p->daddr = *daddr;
103304 atomic_set(&p->refcnt, 1);
103305- atomic_set(&p->rid, 0);
103306+ atomic_set_unchecked(&p->rid, 0);
103307 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
103308 p->rate_tokens = 0;
103309 /* 60*HZ is arbitrary, but chosen enough high so that the first
103310diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
103311index 15f0e2b..8cf8177 100644
103312--- a/net/ipv4/ip_fragment.c
103313+++ b/net/ipv4/ip_fragment.c
103314@@ -268,7 +268,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
103315 return 0;
103316
103317 start = qp->rid;
103318- end = atomic_inc_return(&peer->rid);
103319+ end = atomic_inc_return_unchecked(&peer->rid);
103320 qp->rid = end;
103321
103322 rc = qp->q.fragments && (end - start) > max;
103323@@ -746,12 +746,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
103324
103325 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
103326 {
103327- struct ctl_table *table;
103328+ ctl_table_no_const *table = NULL;
103329 struct ctl_table_header *hdr;
103330
103331- table = ip4_frags_ns_ctl_table;
103332 if (!net_eq(net, &init_net)) {
103333- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
103334+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
103335 if (table == NULL)
103336 goto err_alloc;
103337
103338@@ -765,9 +764,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
103339 /* Don't export sysctls to unprivileged users */
103340 if (net->user_ns != &init_user_ns)
103341 table[0].procname = NULL;
103342- }
103343+ hdr = register_net_sysctl(net, "net/ipv4", table);
103344+ } else
103345+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
103346
103347- hdr = register_net_sysctl(net, "net/ipv4", table);
103348 if (hdr == NULL)
103349 goto err_reg;
103350
103351@@ -775,8 +775,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
103352 return 0;
103353
103354 err_reg:
103355- if (!net_eq(net, &init_net))
103356- kfree(table);
103357+ kfree(table);
103358 err_alloc:
103359 return -ENOMEM;
103360 }
103361diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
103362index 9b84254..c776611 100644
103363--- a/net/ipv4/ip_gre.c
103364+++ b/net/ipv4/ip_gre.c
103365@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
103366 module_param(log_ecn_error, bool, 0644);
103367 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
103368
103369-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
103370+static struct rtnl_link_ops ipgre_link_ops;
103371 static int ipgre_tunnel_init(struct net_device *dev);
103372
103373 static int ipgre_net_id __read_mostly;
103374@@ -733,7 +733,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
103375 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
103376 };
103377
103378-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
103379+static struct rtnl_link_ops ipgre_link_ops = {
103380 .kind = "gre",
103381 .maxtype = IFLA_GRE_MAX,
103382 .policy = ipgre_policy,
103383@@ -747,7 +747,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
103384 .fill_info = ipgre_fill_info,
103385 };
103386
103387-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
103388+static struct rtnl_link_ops ipgre_tap_ops = {
103389 .kind = "gretap",
103390 .maxtype = IFLA_GRE_MAX,
103391 .policy = ipgre_policy,
103392diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
103393index 3d4da2c..40f9c29 100644
103394--- a/net/ipv4/ip_input.c
103395+++ b/net/ipv4/ip_input.c
103396@@ -147,6 +147,10 @@
103397 #include <linux/mroute.h>
103398 #include <linux/netlink.h>
103399
103400+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103401+extern int grsec_enable_blackhole;
103402+#endif
103403+
103404 /*
103405 * Process Router Attention IP option (RFC 2113)
103406 */
103407@@ -223,6 +227,9 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
103408 if (!raw) {
103409 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
103410 IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
103411+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103412+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
103413+#endif
103414 icmp_send(skb, ICMP_DEST_UNREACH,
103415 ICMP_PROT_UNREACH, 0);
103416 }
103417diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
103418index 215af2b..73cbbe1 100644
103419--- a/net/ipv4/ip_output.c
103420+++ b/net/ipv4/ip_output.c
103421@@ -231,7 +231,7 @@ static int ip_finish_output_gso(struct sk_buff *skb)
103422 */
103423 features = netif_skb_features(skb);
103424 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
103425- if (IS_ERR(segs)) {
103426+ if (IS_ERR_OR_NULL(segs)) {
103427 kfree_skb(skb);
103428 return -ENOMEM;
103429 }
103430@@ -1533,6 +1533,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
103431 struct sk_buff *nskb;
103432 struct sock *sk;
103433 struct inet_sock *inet;
103434+ int err;
103435
103436 if (ip_options_echo(&replyopts.opt.opt, skb))
103437 return;
103438@@ -1572,8 +1573,13 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
103439 sock_net_set(sk, net);
103440 __skb_queue_head_init(&sk->sk_write_queue);
103441 sk->sk_sndbuf = sysctl_wmem_default;
103442- ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
103443- &ipc, &rt, MSG_DONTWAIT);
103444+ err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
103445+ len, 0, &ipc, &rt, MSG_DONTWAIT);
103446+ if (unlikely(err)) {
103447+ ip_flush_pending_frames(sk);
103448+ goto out;
103449+ }
103450+
103451 nskb = skb_peek(&sk->sk_write_queue);
103452 if (nskb) {
103453 if (arg->csumoffset >= 0)
103454@@ -1585,7 +1591,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
103455 skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
103456 ip_push_pending_frames(sk, &fl4);
103457 }
103458-
103459+out:
103460 put_cpu_var(unicast_sock);
103461
103462 ip_rt_put(rt);
103463diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
103464index 5cb830c..81a7a56 100644
103465--- a/net/ipv4/ip_sockglue.c
103466+++ b/net/ipv4/ip_sockglue.c
103467@@ -1188,7 +1188,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
103468 len = min_t(unsigned int, len, opt->optlen);
103469 if (put_user(len, optlen))
103470 return -EFAULT;
103471- if (copy_to_user(optval, opt->__data, len))
103472+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
103473+ copy_to_user(optval, opt->__data, len))
103474 return -EFAULT;
103475 return 0;
103476 }
103477@@ -1319,7 +1320,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
103478 if (sk->sk_type != SOCK_STREAM)
103479 return -ENOPROTOOPT;
103480
103481- msg.msg_control = (__force void *) optval;
103482+ msg.msg_control = (__force_kernel void *) optval;
103483 msg.msg_controllen = len;
103484 msg.msg_flags = flags;
103485
103486diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
103487index f4c987b..88c386c 100644
103488--- a/net/ipv4/ip_tunnel_core.c
103489+++ b/net/ipv4/ip_tunnel_core.c
103490@@ -91,11 +91,12 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
103491 skb_pull_rcsum(skb, hdr_len);
103492
103493 if (inner_proto == htons(ETH_P_TEB)) {
103494- struct ethhdr *eh = (struct ethhdr *)skb->data;
103495+ struct ethhdr *eh;
103496
103497 if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
103498 return -ENOMEM;
103499
103500+ eh = (struct ethhdr *)skb->data;
103501 if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN))
103502 skb->protocol = eh->h_proto;
103503 else
103504diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
103505index e453cb7..3c8d952 100644
103506--- a/net/ipv4/ip_vti.c
103507+++ b/net/ipv4/ip_vti.c
103508@@ -45,7 +45,7 @@
103509 #include <net/net_namespace.h>
103510 #include <net/netns/generic.h>
103511
103512-static struct rtnl_link_ops vti_link_ops __read_mostly;
103513+static struct rtnl_link_ops vti_link_ops;
103514
103515 static int vti_net_id __read_mostly;
103516 static int vti_tunnel_init(struct net_device *dev);
103517@@ -519,7 +519,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
103518 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
103519 };
103520
103521-static struct rtnl_link_ops vti_link_ops __read_mostly = {
103522+static struct rtnl_link_ops vti_link_ops = {
103523 .kind = "vti",
103524 .maxtype = IFLA_VTI_MAX,
103525 .policy = vti_policy,
103526diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
103527index 5bbef4f..5bc4fb6 100644
103528--- a/net/ipv4/ipconfig.c
103529+++ b/net/ipv4/ipconfig.c
103530@@ -332,7 +332,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
103531
103532 mm_segment_t oldfs = get_fs();
103533 set_fs(get_ds());
103534- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
103535+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
103536 set_fs(oldfs);
103537 return res;
103538 }
103539@@ -343,7 +343,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
103540
103541 mm_segment_t oldfs = get_fs();
103542 set_fs(get_ds());
103543- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
103544+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
103545 set_fs(oldfs);
103546 return res;
103547 }
103548@@ -354,7 +354,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
103549
103550 mm_segment_t oldfs = get_fs();
103551 set_fs(get_ds());
103552- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
103553+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
103554 set_fs(oldfs);
103555 return res;
103556 }
103557diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
103558index 62eaa00..29b2dc2 100644
103559--- a/net/ipv4/ipip.c
103560+++ b/net/ipv4/ipip.c
103561@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
103562 static int ipip_net_id __read_mostly;
103563
103564 static int ipip_tunnel_init(struct net_device *dev);
103565-static struct rtnl_link_ops ipip_link_ops __read_mostly;
103566+static struct rtnl_link_ops ipip_link_ops;
103567
103568 static int ipip_err(struct sk_buff *skb, u32 info)
103569 {
103570@@ -409,7 +409,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
103571 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
103572 };
103573
103574-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
103575+static struct rtnl_link_ops ipip_link_ops = {
103576 .kind = "ipip",
103577 .maxtype = IFLA_IPTUN_MAX,
103578 .policy = ipip_policy,
103579diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
103580index f95b6f9..2ee2097 100644
103581--- a/net/ipv4/netfilter/arp_tables.c
103582+++ b/net/ipv4/netfilter/arp_tables.c
103583@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
103584 #endif
103585
103586 static int get_info(struct net *net, void __user *user,
103587- const int *len, int compat)
103588+ int len, int compat)
103589 {
103590 char name[XT_TABLE_MAXNAMELEN];
103591 struct xt_table *t;
103592 int ret;
103593
103594- if (*len != sizeof(struct arpt_getinfo)) {
103595- duprintf("length %u != %Zu\n", *len,
103596+ if (len != sizeof(struct arpt_getinfo)) {
103597+ duprintf("length %u != %Zu\n", len,
103598 sizeof(struct arpt_getinfo));
103599 return -EINVAL;
103600 }
103601@@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user,
103602 info.size = private->size;
103603 strcpy(info.name, name);
103604
103605- if (copy_to_user(user, &info, *len) != 0)
103606+ if (copy_to_user(user, &info, len) != 0)
103607 ret = -EFAULT;
103608 else
103609 ret = 0;
103610@@ -1690,7 +1690,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
103611
103612 switch (cmd) {
103613 case ARPT_SO_GET_INFO:
103614- ret = get_info(sock_net(sk), user, len, 1);
103615+ ret = get_info(sock_net(sk), user, *len, 1);
103616 break;
103617 case ARPT_SO_GET_ENTRIES:
103618 ret = compat_get_entries(sock_net(sk), user, len);
103619@@ -1735,7 +1735,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
103620
103621 switch (cmd) {
103622 case ARPT_SO_GET_INFO:
103623- ret = get_info(sock_net(sk), user, len, 0);
103624+ ret = get_info(sock_net(sk), user, *len, 0);
103625 break;
103626
103627 case ARPT_SO_GET_ENTRIES:
103628diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
103629index 99e810f..3711b81 100644
103630--- a/net/ipv4/netfilter/ip_tables.c
103631+++ b/net/ipv4/netfilter/ip_tables.c
103632@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
103633 #endif
103634
103635 static int get_info(struct net *net, void __user *user,
103636- const int *len, int compat)
103637+ int len, int compat)
103638 {
103639 char name[XT_TABLE_MAXNAMELEN];
103640 struct xt_table *t;
103641 int ret;
103642
103643- if (*len != sizeof(struct ipt_getinfo)) {
103644- duprintf("length %u != %zu\n", *len,
103645+ if (len != sizeof(struct ipt_getinfo)) {
103646+ duprintf("length %u != %zu\n", len,
103647 sizeof(struct ipt_getinfo));
103648 return -EINVAL;
103649 }
103650@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user,
103651 info.size = private->size;
103652 strcpy(info.name, name);
103653
103654- if (copy_to_user(user, &info, *len) != 0)
103655+ if (copy_to_user(user, &info, len) != 0)
103656 ret = -EFAULT;
103657 else
103658 ret = 0;
103659@@ -1973,7 +1973,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
103660
103661 switch (cmd) {
103662 case IPT_SO_GET_INFO:
103663- ret = get_info(sock_net(sk), user, len, 1);
103664+ ret = get_info(sock_net(sk), user, *len, 1);
103665 break;
103666 case IPT_SO_GET_ENTRIES:
103667 ret = compat_get_entries(sock_net(sk), user, len);
103668@@ -2020,7 +2020,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
103669
103670 switch (cmd) {
103671 case IPT_SO_GET_INFO:
103672- ret = get_info(sock_net(sk), user, len, 0);
103673+ ret = get_info(sock_net(sk), user, *len, 0);
103674 break;
103675
103676 case IPT_SO_GET_ENTRIES:
103677diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
103678index 2510c02..cfb34fa 100644
103679--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
103680+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
103681@@ -720,7 +720,7 @@ static int clusterip_net_init(struct net *net)
103682 spin_lock_init(&cn->lock);
103683
103684 #ifdef CONFIG_PROC_FS
103685- cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net);
103686+ cn->procdir = proc_mkdir_restrict("ipt_CLUSTERIP", net->proc_net);
103687 if (!cn->procdir) {
103688 pr_err("Unable to proc dir entry\n");
103689 return -ENOMEM;
103690diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
103691index a3c59a0..ec620a50 100644
103692--- a/net/ipv4/ping.c
103693+++ b/net/ipv4/ping.c
103694@@ -59,7 +59,7 @@ struct ping_table {
103695 };
103696
103697 static struct ping_table ping_table;
103698-struct pingv6_ops pingv6_ops;
103699+struct pingv6_ops *pingv6_ops;
103700 EXPORT_SYMBOL_GPL(pingv6_ops);
103701
103702 static u16 ping_port_rover;
103703@@ -348,7 +348,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
103704 return -ENODEV;
103705 }
103706 }
103707- has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
103708+ has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
103709 scoped);
103710 rcu_read_unlock();
103711
103712@@ -556,7 +556,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
103713 }
103714 #if IS_ENABLED(CONFIG_IPV6)
103715 } else if (skb->protocol == htons(ETH_P_IPV6)) {
103716- harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
103717+ harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
103718 #endif
103719 }
103720
103721@@ -574,7 +574,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
103722 info, (u8 *)icmph);
103723 #if IS_ENABLED(CONFIG_IPV6)
103724 } else if (family == AF_INET6) {
103725- pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
103726+ pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
103727 info, (u8 *)icmph);
103728 #endif
103729 }
103730@@ -858,7 +858,7 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
103731 return ip_recv_error(sk, msg, len, addr_len);
103732 #if IS_ENABLED(CONFIG_IPV6)
103733 } else if (family == AF_INET6) {
103734- return pingv6_ops.ipv6_recv_error(sk, msg, len,
103735+ return pingv6_ops->ipv6_recv_error(sk, msg, len,
103736 addr_len);
103737 #endif
103738 }
103739@@ -916,10 +916,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
103740 }
103741
103742 if (inet6_sk(sk)->rxopt.all)
103743- pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb);
103744+ pingv6_ops->ip6_datagram_recv_common_ctl(sk, msg, skb);
103745 if (skb->protocol == htons(ETH_P_IPV6) &&
103746 inet6_sk(sk)->rxopt.all)
103747- pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb);
103748+ pingv6_ops->ip6_datagram_recv_specific_ctl(sk, msg, skb);
103749 else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
103750 ip_cmsg_recv(msg, skb);
103751 #endif
103752@@ -1111,7 +1111,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
103753 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
103754 0, sock_i_ino(sp),
103755 atomic_read(&sp->sk_refcnt), sp,
103756- atomic_read(&sp->sk_drops));
103757+ atomic_read_unchecked(&sp->sk_drops));
103758 }
103759
103760 static int ping_v4_seq_show(struct seq_file *seq, void *v)
103761diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
103762index 739db31..74f0210 100644
103763--- a/net/ipv4/raw.c
103764+++ b/net/ipv4/raw.c
103765@@ -314,7 +314,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
103766 int raw_rcv(struct sock *sk, struct sk_buff *skb)
103767 {
103768 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
103769- atomic_inc(&sk->sk_drops);
103770+ atomic_inc_unchecked(&sk->sk_drops);
103771 kfree_skb(skb);
103772 return NET_RX_DROP;
103773 }
103774@@ -755,16 +755,20 @@ static int raw_init(struct sock *sk)
103775
103776 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
103777 {
103778+ struct icmp_filter filter;
103779+
103780 if (optlen > sizeof(struct icmp_filter))
103781 optlen = sizeof(struct icmp_filter);
103782- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
103783+ if (copy_from_user(&filter, optval, optlen))
103784 return -EFAULT;
103785+ raw_sk(sk)->filter = filter;
103786 return 0;
103787 }
103788
103789 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
103790 {
103791 int len, ret = -EFAULT;
103792+ struct icmp_filter filter;
103793
103794 if (get_user(len, optlen))
103795 goto out;
103796@@ -774,8 +778,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
103797 if (len > sizeof(struct icmp_filter))
103798 len = sizeof(struct icmp_filter);
103799 ret = -EFAULT;
103800- if (put_user(len, optlen) ||
103801- copy_to_user(optval, &raw_sk(sk)->filter, len))
103802+ filter = raw_sk(sk)->filter;
103803+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
103804 goto out;
103805 ret = 0;
103806 out: return ret;
103807@@ -1004,7 +1008,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
103808 0, 0L, 0,
103809 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
103810 0, sock_i_ino(sp),
103811- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
103812+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
103813 }
103814
103815 static int raw_seq_show(struct seq_file *seq, void *v)
103816diff --git a/net/ipv4/route.c b/net/ipv4/route.c
103817index cbadb94..691f99e 100644
103818--- a/net/ipv4/route.c
103819+++ b/net/ipv4/route.c
103820@@ -228,7 +228,7 @@ static const struct seq_operations rt_cache_seq_ops = {
103821
103822 static int rt_cache_seq_open(struct inode *inode, struct file *file)
103823 {
103824- return seq_open(file, &rt_cache_seq_ops);
103825+ return seq_open_restrict(file, &rt_cache_seq_ops);
103826 }
103827
103828 static const struct file_operations rt_cache_seq_fops = {
103829@@ -319,7 +319,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
103830
103831 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
103832 {
103833- return seq_open(file, &rt_cpu_seq_ops);
103834+ return seq_open_restrict(file, &rt_cpu_seq_ops);
103835 }
103836
103837 static const struct file_operations rt_cpu_seq_fops = {
103838@@ -357,7 +357,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
103839
103840 static int rt_acct_proc_open(struct inode *inode, struct file *file)
103841 {
103842- return single_open(file, rt_acct_proc_show, NULL);
103843+ return single_open_restrict(file, rt_acct_proc_show, NULL);
103844 }
103845
103846 static const struct file_operations rt_acct_proc_fops = {
103847@@ -459,11 +459,11 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
103848
103849 #define IP_IDENTS_SZ 2048u
103850 struct ip_ident_bucket {
103851- atomic_t id;
103852+ atomic_unchecked_t id;
103853 u32 stamp32;
103854 };
103855
103856-static struct ip_ident_bucket *ip_idents __read_mostly;
103857+static struct ip_ident_bucket ip_idents[IP_IDENTS_SZ] __read_mostly;
103858
103859 /* In order to protect privacy, we add a perturbation to identifiers
103860 * if one generator is seldom used. This makes hard for an attacker
103861@@ -479,7 +479,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
103862 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
103863 delta = prandom_u32_max(now - old);
103864
103865- return atomic_add_return(segs + delta, &bucket->id) - segs;
103866+ return atomic_add_return_unchecked(segs + delta, &bucket->id) - segs;
103867 }
103868 EXPORT_SYMBOL(ip_idents_reserve);
103869
103870@@ -2623,34 +2623,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
103871 .maxlen = sizeof(int),
103872 .mode = 0200,
103873 .proc_handler = ipv4_sysctl_rtcache_flush,
103874+ .extra1 = &init_net,
103875 },
103876 { },
103877 };
103878
103879 static __net_init int sysctl_route_net_init(struct net *net)
103880 {
103881- struct ctl_table *tbl;
103882+ ctl_table_no_const *tbl = NULL;
103883
103884- tbl = ipv4_route_flush_table;
103885 if (!net_eq(net, &init_net)) {
103886- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
103887+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
103888 if (tbl == NULL)
103889 goto err_dup;
103890
103891 /* Don't export sysctls to unprivileged users */
103892 if (net->user_ns != &init_user_ns)
103893 tbl[0].procname = NULL;
103894- }
103895- tbl[0].extra1 = net;
103896+ tbl[0].extra1 = net;
103897+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
103898+ } else
103899+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
103900
103901- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
103902 if (net->ipv4.route_hdr == NULL)
103903 goto err_reg;
103904 return 0;
103905
103906 err_reg:
103907- if (tbl != ipv4_route_flush_table)
103908- kfree(tbl);
103909+ kfree(tbl);
103910 err_dup:
103911 return -ENOMEM;
103912 }
103913@@ -2673,8 +2673,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
103914
103915 static __net_init int rt_genid_init(struct net *net)
103916 {
103917- atomic_set(&net->ipv4.rt_genid, 0);
103918- atomic_set(&net->fnhe_genid, 0);
103919+ atomic_set_unchecked(&net->ipv4.rt_genid, 0);
103920+ atomic_set_unchecked(&net->fnhe_genid, 0);
103921 get_random_bytes(&net->ipv4.dev_addr_genid,
103922 sizeof(net->ipv4.dev_addr_genid));
103923 return 0;
103924@@ -2717,11 +2717,7 @@ int __init ip_rt_init(void)
103925 {
103926 int rc = 0;
103927
103928- ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
103929- if (!ip_idents)
103930- panic("IP: failed to allocate ip_idents\n");
103931-
103932- prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
103933+ prandom_bytes(ip_idents, sizeof(ip_idents));
103934
103935 #ifdef CONFIG_IP_ROUTE_CLASSID
103936 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
103937diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
103938index 79a007c..5023029 100644
103939--- a/net/ipv4/sysctl_net_ipv4.c
103940+++ b/net/ipv4/sysctl_net_ipv4.c
103941@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
103942 container_of(table->data, struct net, ipv4.ip_local_ports.range);
103943 int ret;
103944 int range[2];
103945- struct ctl_table tmp = {
103946+ ctl_table_no_const tmp = {
103947 .data = &range,
103948 .maxlen = sizeof(range),
103949 .mode = table->mode,
103950@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
103951 int ret;
103952 gid_t urange[2];
103953 kgid_t low, high;
103954- struct ctl_table tmp = {
103955+ ctl_table_no_const tmp = {
103956 .data = &urange,
103957 .maxlen = sizeof(urange),
103958 .mode = table->mode,
103959@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
103960 void __user *buffer, size_t *lenp, loff_t *ppos)
103961 {
103962 char val[TCP_CA_NAME_MAX];
103963- struct ctl_table tbl = {
103964+ ctl_table_no_const tbl = {
103965 .data = val,
103966 .maxlen = TCP_CA_NAME_MAX,
103967 };
103968@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
103969 void __user *buffer, size_t *lenp,
103970 loff_t *ppos)
103971 {
103972- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
103973+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
103974 int ret;
103975
103976 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
103977@@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
103978 void __user *buffer, size_t *lenp,
103979 loff_t *ppos)
103980 {
103981- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
103982+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
103983 int ret;
103984
103985 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
103986@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
103987 void __user *buffer, size_t *lenp,
103988 loff_t *ppos)
103989 {
103990- struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
103991+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
103992 struct tcp_fastopen_context *ctxt;
103993 int ret;
103994 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
103995@@ -857,13 +857,12 @@ static struct ctl_table ipv4_net_table[] = {
103996
103997 static __net_init int ipv4_sysctl_init_net(struct net *net)
103998 {
103999- struct ctl_table *table;
104000+ ctl_table_no_const *table = NULL;
104001
104002- table = ipv4_net_table;
104003 if (!net_eq(net, &init_net)) {
104004 int i;
104005
104006- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
104007+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
104008 if (table == NULL)
104009 goto err_alloc;
104010
104011@@ -872,7 +871,10 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
104012 table[i].data += (void *)net - (void *)&init_net;
104013 }
104014
104015- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
104016+ if (!net_eq(net, &init_net))
104017+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
104018+ else
104019+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
104020 if (net->ipv4.ipv4_hdr == NULL)
104021 goto err_reg;
104022
104023diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
104024index a906e02..f3b6a0f 100644
104025--- a/net/ipv4/tcp_input.c
104026+++ b/net/ipv4/tcp_input.c
104027@@ -755,7 +755,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
104028 * without any lock. We want to make sure compiler wont store
104029 * intermediate values in this location.
104030 */
104031- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
104032+ ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
104033 sk->sk_max_pacing_rate);
104034 }
104035
104036@@ -4488,7 +4488,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
104037 * simplifies code)
104038 */
104039 static void
104040-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
104041+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
104042 struct sk_buff *head, struct sk_buff *tail,
104043 u32 start, u32 end)
104044 {
104045@@ -5546,6 +5546,7 @@ discard:
104046 tcp_paws_reject(&tp->rx_opt, 0))
104047 goto discard_and_undo;
104048
104049+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
104050 if (th->syn) {
104051 /* We see SYN without ACK. It is attempt of
104052 * simultaneous connect with crossed SYNs.
104053@@ -5596,6 +5597,7 @@ discard:
104054 goto discard;
104055 #endif
104056 }
104057+#endif
104058 /* "fifth, if neither of the SYN or RST bits is set then
104059 * drop the segment and return."
104060 */
104061@@ -5642,7 +5644,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
104062 goto discard;
104063
104064 if (th->syn) {
104065- if (th->fin)
104066+ if (th->fin || th->urg || th->psh)
104067 goto discard;
104068 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
104069 return 1;
104070diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
104071index cd17f00..1e1f252 100644
104072--- a/net/ipv4/tcp_ipv4.c
104073+++ b/net/ipv4/tcp_ipv4.c
104074@@ -91,6 +91,10 @@ int sysctl_tcp_low_latency __read_mostly;
104075 EXPORT_SYMBOL(sysctl_tcp_low_latency);
104076
104077
104078+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104079+extern int grsec_enable_blackhole;
104080+#endif
104081+
104082 #ifdef CONFIG_TCP_MD5SIG
104083 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
104084 __be32 daddr, __be32 saddr, const struct tcphdr *th);
104085@@ -1487,6 +1491,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
104086 return 0;
104087
104088 reset:
104089+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104090+ if (!grsec_enable_blackhole)
104091+#endif
104092 tcp_v4_send_reset(rsk, skb);
104093 discard:
104094 kfree_skb(skb);
104095@@ -1633,12 +1640,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
104096 TCP_SKB_CB(skb)->sacked = 0;
104097
104098 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
104099- if (!sk)
104100+ if (!sk) {
104101+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104102+ ret = 1;
104103+#endif
104104 goto no_tcp_socket;
104105-
104106+ }
104107 process:
104108- if (sk->sk_state == TCP_TIME_WAIT)
104109+ if (sk->sk_state == TCP_TIME_WAIT) {
104110+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104111+ ret = 2;
104112+#endif
104113 goto do_time_wait;
104114+ }
104115
104116 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
104117 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
104118@@ -1704,6 +1718,10 @@ csum_error:
104119 bad_packet:
104120 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
104121 } else {
104122+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104123+ if (!grsec_enable_blackhole || (ret == 1 &&
104124+ (skb->dev->flags & IFF_LOOPBACK)))
104125+#endif
104126 tcp_v4_send_reset(NULL, skb);
104127 }
104128
104129diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
104130index 1649988..6251843 100644
104131--- a/net/ipv4/tcp_minisocks.c
104132+++ b/net/ipv4/tcp_minisocks.c
104133@@ -27,6 +27,10 @@
104134 #include <net/inet_common.h>
104135 #include <net/xfrm.h>
104136
104137+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104138+extern int grsec_enable_blackhole;
104139+#endif
104140+
104141 int sysctl_tcp_syncookies __read_mostly = 1;
104142 EXPORT_SYMBOL(sysctl_tcp_syncookies);
104143
104144@@ -740,7 +744,10 @@ embryonic_reset:
104145 * avoid becoming vulnerable to outside attack aiming at
104146 * resetting legit local connections.
104147 */
104148- req->rsk_ops->send_reset(sk, skb);
104149+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104150+ if (!grsec_enable_blackhole)
104151+#endif
104152+ req->rsk_ops->send_reset(sk, skb);
104153 } else if (fastopen) { /* received a valid RST pkt */
104154 reqsk_fastopen_remove(sk, req, true);
104155 tcp_reset(sk);
104156diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
104157index 3b66610..bfbe23a 100644
104158--- a/net/ipv4/tcp_probe.c
104159+++ b/net/ipv4/tcp_probe.c
104160@@ -238,7 +238,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
104161 if (cnt + width >= len)
104162 break;
104163
104164- if (copy_to_user(buf + cnt, tbuf, width))
104165+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
104166 return -EFAULT;
104167 cnt += width;
104168 }
104169diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
104170index df90cd1..9ab2c9b 100644
104171--- a/net/ipv4/tcp_timer.c
104172+++ b/net/ipv4/tcp_timer.c
104173@@ -22,6 +22,10 @@
104174 #include <linux/gfp.h>
104175 #include <net/tcp.h>
104176
104177+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104178+extern int grsec_lastack_retries;
104179+#endif
104180+
104181 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
104182 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
104183 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
104184@@ -192,6 +196,13 @@ static int tcp_write_timeout(struct sock *sk)
104185 }
104186 }
104187
104188+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104189+ if ((sk->sk_state == TCP_LAST_ACK) &&
104190+ (grsec_lastack_retries > 0) &&
104191+ (grsec_lastack_retries < retry_until))
104192+ retry_until = grsec_lastack_retries;
104193+#endif
104194+
104195 if (retransmits_timed_out(sk, retry_until,
104196 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
104197 /* Has it gone just too far? */
104198diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
104199index f57c0e4..cf24bd0 100644
104200--- a/net/ipv4/udp.c
104201+++ b/net/ipv4/udp.c
104202@@ -87,6 +87,7 @@
104203 #include <linux/types.h>
104204 #include <linux/fcntl.h>
104205 #include <linux/module.h>
104206+#include <linux/security.h>
104207 #include <linux/socket.h>
104208 #include <linux/sockios.h>
104209 #include <linux/igmp.h>
104210@@ -113,6 +114,10 @@
104211 #include <net/busy_poll.h>
104212 #include "udp_impl.h"
104213
104214+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104215+extern int grsec_enable_blackhole;
104216+#endif
104217+
104218 struct udp_table udp_table __read_mostly;
104219 EXPORT_SYMBOL(udp_table);
104220
104221@@ -594,6 +599,9 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
104222 return true;
104223 }
104224
104225+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
104226+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
104227+
104228 /*
104229 * This routine is called by the ICMP module when it gets some
104230 * sort of error condition. If err < 0 then the socket should
104231@@ -931,9 +939,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
104232 dport = usin->sin_port;
104233 if (dport == 0)
104234 return -EINVAL;
104235+
104236+ err = gr_search_udp_sendmsg(sk, usin);
104237+ if (err)
104238+ return err;
104239 } else {
104240 if (sk->sk_state != TCP_ESTABLISHED)
104241 return -EDESTADDRREQ;
104242+
104243+ err = gr_search_udp_sendmsg(sk, NULL);
104244+ if (err)
104245+ return err;
104246+
104247 daddr = inet->inet_daddr;
104248 dport = inet->inet_dport;
104249 /* Open fast path for connected socket.
104250@@ -1181,7 +1198,7 @@ static unsigned int first_packet_length(struct sock *sk)
104251 IS_UDPLITE(sk));
104252 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
104253 IS_UDPLITE(sk));
104254- atomic_inc(&sk->sk_drops);
104255+ atomic_inc_unchecked(&sk->sk_drops);
104256 __skb_unlink(skb, rcvq);
104257 __skb_queue_tail(&list_kill, skb);
104258 }
104259@@ -1261,6 +1278,10 @@ try_again:
104260 if (!skb)
104261 goto out;
104262
104263+ err = gr_search_udp_recvmsg(sk, skb);
104264+ if (err)
104265+ goto out_free;
104266+
104267 ulen = skb->len - sizeof(struct udphdr);
104268 copied = len;
104269 if (copied > ulen)
104270@@ -1294,7 +1315,7 @@ try_again:
104271 if (unlikely(err)) {
104272 trace_kfree_skb(skb, udp_recvmsg);
104273 if (!peeked) {
104274- atomic_inc(&sk->sk_drops);
104275+ atomic_inc_unchecked(&sk->sk_drops);
104276 UDP_INC_STATS_USER(sock_net(sk),
104277 UDP_MIB_INERRORS, is_udplite);
104278 }
104279@@ -1591,7 +1612,7 @@ csum_error:
104280 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
104281 drop:
104282 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
104283- atomic_inc(&sk->sk_drops);
104284+ atomic_inc_unchecked(&sk->sk_drops);
104285 kfree_skb(skb);
104286 return -1;
104287 }
104288@@ -1610,7 +1631,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
104289 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
104290
104291 if (!skb1) {
104292- atomic_inc(&sk->sk_drops);
104293+ atomic_inc_unchecked(&sk->sk_drops);
104294 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
104295 IS_UDPLITE(sk));
104296 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
104297@@ -1807,6 +1828,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
104298 goto csum_error;
104299
104300 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
104301+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104302+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
104303+#endif
104304 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
104305
104306 /*
104307@@ -2393,7 +2417,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
104308 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
104309 0, sock_i_ino(sp),
104310 atomic_read(&sp->sk_refcnt), sp,
104311- atomic_read(&sp->sk_drops));
104312+ atomic_read_unchecked(&sp->sk_drops));
104313 }
104314
104315 int udp4_seq_show(struct seq_file *seq, void *v)
104316diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
104317index 6156f68..d6ab46d 100644
104318--- a/net/ipv4/xfrm4_policy.c
104319+++ b/net/ipv4/xfrm4_policy.c
104320@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
104321 fl4->flowi4_tos = iph->tos;
104322 }
104323
104324-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
104325+static int xfrm4_garbage_collect(struct dst_ops *ops)
104326 {
104327 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
104328
104329- xfrm4_policy_afinfo.garbage_collect(net);
104330+ xfrm_garbage_collect_deferred(net);
104331 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
104332 }
104333
104334@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
104335
104336 static int __net_init xfrm4_net_init(struct net *net)
104337 {
104338- struct ctl_table *table;
104339+ ctl_table_no_const *table = NULL;
104340 struct ctl_table_header *hdr;
104341
104342- table = xfrm4_policy_table;
104343 if (!net_eq(net, &init_net)) {
104344- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
104345+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
104346 if (!table)
104347 goto err_alloc;
104348
104349 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
104350- }
104351-
104352- hdr = register_net_sysctl(net, "net/ipv4", table);
104353+ hdr = register_net_sysctl(net, "net/ipv4", table);
104354+ } else
104355+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
104356 if (!hdr)
104357 goto err_reg;
104358
104359@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
104360 return 0;
104361
104362 err_reg:
104363- if (!net_eq(net, &init_net))
104364- kfree(table);
104365+ kfree(table);
104366 err_alloc:
104367 return -ENOMEM;
104368 }
104369diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
104370index 3e118df..27b16cf 100644
104371--- a/net/ipv6/addrconf.c
104372+++ b/net/ipv6/addrconf.c
104373@@ -604,7 +604,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
104374 idx = 0;
104375 head = &net->dev_index_head[h];
104376 rcu_read_lock();
104377- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
104378+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
104379 net->dev_base_seq;
104380 hlist_for_each_entry_rcu(dev, head, index_hlist) {
104381 if (idx < s_idx)
104382@@ -2396,7 +2396,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
104383 p.iph.ihl = 5;
104384 p.iph.protocol = IPPROTO_IPV6;
104385 p.iph.ttl = 64;
104386- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
104387+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
104388
104389 if (ops->ndo_do_ioctl) {
104390 mm_segment_t oldfs = get_fs();
104391@@ -3531,16 +3531,23 @@ static const struct file_operations if6_fops = {
104392 .release = seq_release_net,
104393 };
104394
104395+extern void register_ipv6_seq_ops_addr(struct seq_operations *addr);
104396+extern void unregister_ipv6_seq_ops_addr(void);
104397+
104398 static int __net_init if6_proc_net_init(struct net *net)
104399 {
104400- if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
104401+ register_ipv6_seq_ops_addr(&if6_seq_ops);
104402+ if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops)) {
104403+ unregister_ipv6_seq_ops_addr();
104404 return -ENOMEM;
104405+ }
104406 return 0;
104407 }
104408
104409 static void __net_exit if6_proc_net_exit(struct net *net)
104410 {
104411 remove_proc_entry("if_inet6", net->proc_net);
104412+ unregister_ipv6_seq_ops_addr();
104413 }
104414
104415 static struct pernet_operations if6_proc_net_ops = {
104416@@ -4156,7 +4163,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
104417 s_ip_idx = ip_idx = cb->args[2];
104418
104419 rcu_read_lock();
104420- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
104421+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
104422 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
104423 idx = 0;
104424 head = &net->dev_index_head[h];
104425@@ -4784,7 +4791,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
104426 rt_genid_bump_ipv6(net);
104427 break;
104428 }
104429- atomic_inc(&net->ipv6.dev_addr_genid);
104430+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
104431 }
104432
104433 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
104434@@ -4804,7 +4811,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
104435 int *valp = ctl->data;
104436 int val = *valp;
104437 loff_t pos = *ppos;
104438- struct ctl_table lctl;
104439+ ctl_table_no_const lctl;
104440 int ret;
104441
104442 /*
104443@@ -4889,7 +4896,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
104444 int *valp = ctl->data;
104445 int val = *valp;
104446 loff_t pos = *ppos;
104447- struct ctl_table lctl;
104448+ ctl_table_no_const lctl;
104449 int ret;
104450
104451 /*
104452diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
104453index 2daa3a1..341066c 100644
104454--- a/net/ipv6/af_inet6.c
104455+++ b/net/ipv6/af_inet6.c
104456@@ -766,7 +766,7 @@ static int __net_init inet6_net_init(struct net *net)
104457 net->ipv6.sysctl.icmpv6_time = 1*HZ;
104458 net->ipv6.sysctl.flowlabel_consistency = 1;
104459 net->ipv6.sysctl.auto_flowlabels = 0;
104460- atomic_set(&net->ipv6.rt_genid, 0);
104461+ atomic_set_unchecked(&net->ipv6.rt_genid, 0);
104462
104463 err = ipv6_init_mibs(net);
104464 if (err)
104465diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
104466index 2753319..b7e625c 100644
104467--- a/net/ipv6/datagram.c
104468+++ b/net/ipv6/datagram.c
104469@@ -939,5 +939,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
104470 0,
104471 sock_i_ino(sp),
104472 atomic_read(&sp->sk_refcnt), sp,
104473- atomic_read(&sp->sk_drops));
104474+ atomic_read_unchecked(&sp->sk_drops));
104475 }
104476diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
104477index 06ba3e5..5c08d38 100644
104478--- a/net/ipv6/icmp.c
104479+++ b/net/ipv6/icmp.c
104480@@ -993,7 +993,7 @@ static struct ctl_table ipv6_icmp_table_template[] = {
104481
104482 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
104483 {
104484- struct ctl_table *table;
104485+ ctl_table_no_const *table;
104486
104487 table = kmemdup(ipv6_icmp_table_template,
104488 sizeof(ipv6_icmp_table_template),
104489diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
104490index 97299d7..c8e6894 100644
104491--- a/net/ipv6/ip6_gre.c
104492+++ b/net/ipv6/ip6_gre.c
104493@@ -71,8 +71,8 @@ struct ip6gre_net {
104494 struct net_device *fb_tunnel_dev;
104495 };
104496
104497-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
104498-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
104499+static struct rtnl_link_ops ip6gre_link_ops;
104500+static struct rtnl_link_ops ip6gre_tap_ops;
104501 static int ip6gre_tunnel_init(struct net_device *dev);
104502 static void ip6gre_tunnel_setup(struct net_device *dev);
104503 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
104504@@ -1286,7 +1286,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
104505 }
104506
104507
104508-static struct inet6_protocol ip6gre_protocol __read_mostly = {
104509+static struct inet6_protocol ip6gre_protocol = {
104510 .handler = ip6gre_rcv,
104511 .err_handler = ip6gre_err,
104512 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
104513@@ -1645,7 +1645,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
104514 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
104515 };
104516
104517-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
104518+static struct rtnl_link_ops ip6gre_link_ops = {
104519 .kind = "ip6gre",
104520 .maxtype = IFLA_GRE_MAX,
104521 .policy = ip6gre_policy,
104522@@ -1659,7 +1659,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
104523 .fill_info = ip6gre_fill_info,
104524 };
104525
104526-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
104527+static struct rtnl_link_ops ip6gre_tap_ops = {
104528 .kind = "ip6gretap",
104529 .maxtype = IFLA_GRE_MAX,
104530 .policy = ip6gre_policy,
104531diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
104532index 65eda2a..620a102 100644
104533--- a/net/ipv6/ip6_offload.c
104534+++ b/net/ipv6/ip6_offload.c
104535@@ -46,6 +46,7 @@ static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
104536 if (unlikely(!pskb_may_pull(skb, len)))
104537 break;
104538
104539+ opth = (void *)skb->data;
104540 proto = opth->nexthdr;
104541 __skb_pull(skb, len);
104542 }
104543diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
104544index 69a84b4..881c319 100644
104545--- a/net/ipv6/ip6_tunnel.c
104546+++ b/net/ipv6/ip6_tunnel.c
104547@@ -86,7 +86,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
104548
104549 static int ip6_tnl_dev_init(struct net_device *dev);
104550 static void ip6_tnl_dev_setup(struct net_device *dev);
104551-static struct rtnl_link_ops ip6_link_ops __read_mostly;
104552+static struct rtnl_link_ops ip6_link_ops;
104553
104554 static int ip6_tnl_net_id __read_mostly;
104555 struct ip6_tnl_net {
104556@@ -1714,7 +1714,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
104557 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
104558 };
104559
104560-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
104561+static struct rtnl_link_ops ip6_link_ops = {
104562 .kind = "ip6tnl",
104563 .maxtype = IFLA_IPTUN_MAX,
104564 .policy = ip6_tnl_policy,
104565diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
104566index 5833a22..6631377 100644
104567--- a/net/ipv6/ip6_vti.c
104568+++ b/net/ipv6/ip6_vti.c
104569@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
104570
104571 static int vti6_dev_init(struct net_device *dev);
104572 static void vti6_dev_setup(struct net_device *dev);
104573-static struct rtnl_link_ops vti6_link_ops __read_mostly;
104574+static struct rtnl_link_ops vti6_link_ops;
104575
104576 static int vti6_net_id __read_mostly;
104577 struct vti6_net {
104578@@ -981,7 +981,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
104579 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
104580 };
104581
104582-static struct rtnl_link_ops vti6_link_ops __read_mostly = {
104583+static struct rtnl_link_ops vti6_link_ops = {
104584 .kind = "vti6",
104585 .maxtype = IFLA_VTI_MAX,
104586 .policy = vti6_policy,
104587diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
104588index 0c28998..d0a2ecd 100644
104589--- a/net/ipv6/ipv6_sockglue.c
104590+++ b/net/ipv6/ipv6_sockglue.c
104591@@ -995,7 +995,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
104592 if (sk->sk_type != SOCK_STREAM)
104593 return -ENOPROTOOPT;
104594
104595- msg.msg_control = optval;
104596+ msg.msg_control = (void __force_kernel *)optval;
104597 msg.msg_controllen = len;
104598 msg.msg_flags = flags;
104599
104600diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
104601index e080fbb..412b3cf 100644
104602--- a/net/ipv6/netfilter/ip6_tables.c
104603+++ b/net/ipv6/netfilter/ip6_tables.c
104604@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
104605 #endif
104606
104607 static int get_info(struct net *net, void __user *user,
104608- const int *len, int compat)
104609+ int len, int compat)
104610 {
104611 char name[XT_TABLE_MAXNAMELEN];
104612 struct xt_table *t;
104613 int ret;
104614
104615- if (*len != sizeof(struct ip6t_getinfo)) {
104616- duprintf("length %u != %zu\n", *len,
104617+ if (len != sizeof(struct ip6t_getinfo)) {
104618+ duprintf("length %u != %zu\n", len,
104619 sizeof(struct ip6t_getinfo));
104620 return -EINVAL;
104621 }
104622@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user,
104623 info.size = private->size;
104624 strcpy(info.name, name);
104625
104626- if (copy_to_user(user, &info, *len) != 0)
104627+ if (copy_to_user(user, &info, len) != 0)
104628 ret = -EFAULT;
104629 else
104630 ret = 0;
104631@@ -1983,7 +1983,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
104632
104633 switch (cmd) {
104634 case IP6T_SO_GET_INFO:
104635- ret = get_info(sock_net(sk), user, len, 1);
104636+ ret = get_info(sock_net(sk), user, *len, 1);
104637 break;
104638 case IP6T_SO_GET_ENTRIES:
104639 ret = compat_get_entries(sock_net(sk), user, len);
104640@@ -2030,7 +2030,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
104641
104642 switch (cmd) {
104643 case IP6T_SO_GET_INFO:
104644- ret = get_info(sock_net(sk), user, len, 0);
104645+ ret = get_info(sock_net(sk), user, *len, 0);
104646 break;
104647
104648 case IP6T_SO_GET_ENTRIES:
104649diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
104650index 6f187c8..34b367f 100644
104651--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
104652+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
104653@@ -96,12 +96,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
104654
104655 static int nf_ct_frag6_sysctl_register(struct net *net)
104656 {
104657- struct ctl_table *table;
104658+ ctl_table_no_const *table = NULL;
104659 struct ctl_table_header *hdr;
104660
104661- table = nf_ct_frag6_sysctl_table;
104662 if (!net_eq(net, &init_net)) {
104663- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
104664+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
104665 GFP_KERNEL);
104666 if (table == NULL)
104667 goto err_alloc;
104668@@ -112,9 +111,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
104669 table[2].data = &net->nf_frag.frags.high_thresh;
104670 table[2].extra1 = &net->nf_frag.frags.low_thresh;
104671 table[2].extra2 = &init_net.nf_frag.frags.high_thresh;
104672- }
104673-
104674- hdr = register_net_sysctl(net, "net/netfilter", table);
104675+ hdr = register_net_sysctl(net, "net/netfilter", table);
104676+ } else
104677+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
104678 if (hdr == NULL)
104679 goto err_reg;
104680
104681@@ -122,8 +121,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
104682 return 0;
104683
104684 err_reg:
104685- if (!net_eq(net, &init_net))
104686- kfree(table);
104687+ kfree(table);
104688 err_alloc:
104689 return -ENOMEM;
104690 }
104691diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
104692index 5b7a1ed..d9da205 100644
104693--- a/net/ipv6/ping.c
104694+++ b/net/ipv6/ping.c
104695@@ -240,6 +240,24 @@ static struct pernet_operations ping_v6_net_ops = {
104696 };
104697 #endif
104698
104699+static struct pingv6_ops real_pingv6_ops = {
104700+ .ipv6_recv_error = ipv6_recv_error,
104701+ .ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl,
104702+ .ip6_datagram_recv_specific_ctl = ip6_datagram_recv_specific_ctl,
104703+ .icmpv6_err_convert = icmpv6_err_convert,
104704+ .ipv6_icmp_error = ipv6_icmp_error,
104705+ .ipv6_chk_addr = ipv6_chk_addr,
104706+};
104707+
104708+static struct pingv6_ops dummy_pingv6_ops = {
104709+ .ipv6_recv_error = dummy_ipv6_recv_error,
104710+ .ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl,
104711+ .ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl,
104712+ .icmpv6_err_convert = dummy_icmpv6_err_convert,
104713+ .ipv6_icmp_error = dummy_ipv6_icmp_error,
104714+ .ipv6_chk_addr = dummy_ipv6_chk_addr,
104715+};
104716+
104717 int __init pingv6_init(void)
104718 {
104719 #ifdef CONFIG_PROC_FS
104720@@ -247,13 +265,7 @@ int __init pingv6_init(void)
104721 if (ret)
104722 return ret;
104723 #endif
104724- pingv6_ops.ipv6_recv_error = ipv6_recv_error;
104725- pingv6_ops.ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl;
104726- pingv6_ops.ip6_datagram_recv_specific_ctl =
104727- ip6_datagram_recv_specific_ctl;
104728- pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
104729- pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
104730- pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
104731+ pingv6_ops = &real_pingv6_ops;
104732 return inet6_register_protosw(&pingv6_protosw);
104733 }
104734
104735@@ -262,14 +274,9 @@ int __init pingv6_init(void)
104736 */
104737 void pingv6_exit(void)
104738 {
104739- pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
104740- pingv6_ops.ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl;
104741- pingv6_ops.ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl;
104742- pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
104743- pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
104744- pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
104745 #ifdef CONFIG_PROC_FS
104746 unregister_pernet_subsys(&ping_v6_net_ops);
104747 #endif
104748+ pingv6_ops = &dummy_pingv6_ops;
104749 inet6_unregister_protosw(&pingv6_protosw);
104750 }
104751diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
104752index 2d6f860..b0165f5 100644
104753--- a/net/ipv6/proc.c
104754+++ b/net/ipv6/proc.c
104755@@ -309,7 +309,7 @@ static int __net_init ipv6_proc_init_net(struct net *net)
104756 if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
104757 goto proc_snmp6_fail;
104758
104759- net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
104760+ net->mib.proc_net_devsnmp6 = proc_mkdir_restrict("dev_snmp6", net->proc_net);
104761 if (!net->mib.proc_net_devsnmp6)
104762 goto proc_dev_snmp6_fail;
104763 return 0;
104764diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
104765index 39d4422..b0979547 100644
104766--- a/net/ipv6/raw.c
104767+++ b/net/ipv6/raw.c
104768@@ -388,7 +388,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
104769 {
104770 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
104771 skb_checksum_complete(skb)) {
104772- atomic_inc(&sk->sk_drops);
104773+ atomic_inc_unchecked(&sk->sk_drops);
104774 kfree_skb(skb);
104775 return NET_RX_DROP;
104776 }
104777@@ -416,7 +416,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
104778 struct raw6_sock *rp = raw6_sk(sk);
104779
104780 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
104781- atomic_inc(&sk->sk_drops);
104782+ atomic_inc_unchecked(&sk->sk_drops);
104783 kfree_skb(skb);
104784 return NET_RX_DROP;
104785 }
104786@@ -440,7 +440,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
104787
104788 if (inet->hdrincl) {
104789 if (skb_checksum_complete(skb)) {
104790- atomic_inc(&sk->sk_drops);
104791+ atomic_inc_unchecked(&sk->sk_drops);
104792 kfree_skb(skb);
104793 return NET_RX_DROP;
104794 }
104795@@ -608,7 +608,7 @@ out:
104796 return err;
104797 }
104798
104799-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
104800+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
104801 struct flowi6 *fl6, struct dst_entry **dstp,
104802 unsigned int flags)
104803 {
104804@@ -914,12 +914,15 @@ do_confirm:
104805 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
104806 char __user *optval, int optlen)
104807 {
104808+ struct icmp6_filter filter;
104809+
104810 switch (optname) {
104811 case ICMPV6_FILTER:
104812 if (optlen > sizeof(struct icmp6_filter))
104813 optlen = sizeof(struct icmp6_filter);
104814- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
104815+ if (copy_from_user(&filter, optval, optlen))
104816 return -EFAULT;
104817+ raw6_sk(sk)->filter = filter;
104818 return 0;
104819 default:
104820 return -ENOPROTOOPT;
104821@@ -932,6 +935,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
104822 char __user *optval, int __user *optlen)
104823 {
104824 int len;
104825+ struct icmp6_filter filter;
104826
104827 switch (optname) {
104828 case ICMPV6_FILTER:
104829@@ -943,7 +947,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
104830 len = sizeof(struct icmp6_filter);
104831 if (put_user(len, optlen))
104832 return -EFAULT;
104833- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
104834+ filter = raw6_sk(sk)->filter;
104835+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
104836 return -EFAULT;
104837 return 0;
104838 default:
104839diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
104840index c6557d9..173e728 100644
104841--- a/net/ipv6/reassembly.c
104842+++ b/net/ipv6/reassembly.c
104843@@ -627,12 +627,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
104844
104845 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
104846 {
104847- struct ctl_table *table;
104848+ ctl_table_no_const *table = NULL;
104849 struct ctl_table_header *hdr;
104850
104851- table = ip6_frags_ns_ctl_table;
104852 if (!net_eq(net, &init_net)) {
104853- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
104854+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
104855 if (table == NULL)
104856 goto err_alloc;
104857
104858@@ -646,9 +645,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
104859 /* Don't export sysctls to unprivileged users */
104860 if (net->user_ns != &init_user_ns)
104861 table[0].procname = NULL;
104862- }
104863+ hdr = register_net_sysctl(net, "net/ipv6", table);
104864+ } else
104865+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
104866
104867- hdr = register_net_sysctl(net, "net/ipv6", table);
104868 if (hdr == NULL)
104869 goto err_reg;
104870
104871@@ -656,8 +656,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
104872 return 0;
104873
104874 err_reg:
104875- if (!net_eq(net, &init_net))
104876- kfree(table);
104877+ kfree(table);
104878 err_alloc:
104879 return -ENOMEM;
104880 }
104881diff --git a/net/ipv6/route.c b/net/ipv6/route.c
104882index bafde82..af2c91f 100644
104883--- a/net/ipv6/route.c
104884+++ b/net/ipv6/route.c
104885@@ -2967,7 +2967,7 @@ struct ctl_table ipv6_route_table_template[] = {
104886
104887 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
104888 {
104889- struct ctl_table *table;
104890+ ctl_table_no_const *table;
104891
104892 table = kmemdup(ipv6_route_table_template,
104893 sizeof(ipv6_route_table_template),
104894diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
104895index 6163f85..0070823 100644
104896--- a/net/ipv6/sit.c
104897+++ b/net/ipv6/sit.c
104898@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
104899 static void ipip6_dev_free(struct net_device *dev);
104900 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
104901 __be32 *v4dst);
104902-static struct rtnl_link_ops sit_link_ops __read_mostly;
104903+static struct rtnl_link_ops sit_link_ops;
104904
104905 static int sit_net_id __read_mostly;
104906 struct sit_net {
104907@@ -485,11 +485,11 @@ static void ipip6_tunnel_uninit(struct net_device *dev)
104908 */
104909 static int ipip6_err_gen_icmpv6_unreach(struct sk_buff *skb)
104910 {
104911- const struct iphdr *iph = (const struct iphdr *) skb->data;
104912+ int ihl = ((const struct iphdr *)skb->data)->ihl*4;
104913 struct rt6_info *rt;
104914 struct sk_buff *skb2;
104915
104916- if (!pskb_may_pull(skb, iph->ihl * 4 + sizeof(struct ipv6hdr) + 8))
104917+ if (!pskb_may_pull(skb, ihl + sizeof(struct ipv6hdr) + 8))
104918 return 1;
104919
104920 skb2 = skb_clone(skb, GFP_ATOMIC);
104921@@ -498,7 +498,7 @@ static int ipip6_err_gen_icmpv6_unreach(struct sk_buff *skb)
104922 return 1;
104923
104924 skb_dst_drop(skb2);
104925- skb_pull(skb2, iph->ihl * 4);
104926+ skb_pull(skb2, ihl);
104927 skb_reset_network_header(skb2);
104928
104929 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0, 0);
104930@@ -1662,7 +1662,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
104931 unregister_netdevice_queue(dev, head);
104932 }
104933
104934-static struct rtnl_link_ops sit_link_ops __read_mostly = {
104935+static struct rtnl_link_ops sit_link_ops = {
104936 .kind = "sit",
104937 .maxtype = IFLA_IPTUN_MAX,
104938 .policy = ipip6_policy,
104939diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
104940index 0c56c93..ece50df 100644
104941--- a/net/ipv6/sysctl_net_ipv6.c
104942+++ b/net/ipv6/sysctl_net_ipv6.c
104943@@ -68,7 +68,7 @@ static struct ctl_table ipv6_rotable[] = {
104944
104945 static int __net_init ipv6_sysctl_net_init(struct net *net)
104946 {
104947- struct ctl_table *ipv6_table;
104948+ ctl_table_no_const *ipv6_table;
104949 struct ctl_table *ipv6_route_table;
104950 struct ctl_table *ipv6_icmp_table;
104951 int err;
104952diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
104953index 29964c3..b8caecf 100644
104954--- a/net/ipv6/tcp_ipv6.c
104955+++ b/net/ipv6/tcp_ipv6.c
104956@@ -102,6 +102,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
104957 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
104958 }
104959
104960+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104961+extern int grsec_enable_blackhole;
104962+#endif
104963+
104964 static void tcp_v6_hash(struct sock *sk)
104965 {
104966 if (sk->sk_state != TCP_CLOSE) {
104967@@ -1333,6 +1337,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
104968 return 0;
104969
104970 reset:
104971+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104972+ if (!grsec_enable_blackhole)
104973+#endif
104974 tcp_v6_send_reset(sk, skb);
104975 discard:
104976 if (opt_skb)
104977@@ -1417,12 +1424,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
104978 TCP_SKB_CB(skb)->sacked = 0;
104979
104980 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
104981- if (!sk)
104982+ if (!sk) {
104983+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104984+ ret = 1;
104985+#endif
104986 goto no_tcp_socket;
104987+ }
104988
104989 process:
104990- if (sk->sk_state == TCP_TIME_WAIT)
104991+ if (sk->sk_state == TCP_TIME_WAIT) {
104992+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104993+ ret = 2;
104994+#endif
104995 goto do_time_wait;
104996+ }
104997
104998 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
104999 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
105000@@ -1479,6 +1494,10 @@ csum_error:
105001 bad_packet:
105002 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
105003 } else {
105004+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105005+ if (!grsec_enable_blackhole || (ret == 1 &&
105006+ (skb->dev->flags & IFF_LOOPBACK)))
105007+#endif
105008 tcp_v6_send_reset(NULL, skb);
105009 }
105010
105011diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
105012index 4836af8..0e52bbd 100644
105013--- a/net/ipv6/udp.c
105014+++ b/net/ipv6/udp.c
105015@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
105016 udp_ipv6_hash_secret + net_hash_mix(net));
105017 }
105018
105019+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105020+extern int grsec_enable_blackhole;
105021+#endif
105022+
105023 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
105024 {
105025 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
105026@@ -434,7 +438,7 @@ try_again:
105027 if (unlikely(err)) {
105028 trace_kfree_skb(skb, udpv6_recvmsg);
105029 if (!peeked) {
105030- atomic_inc(&sk->sk_drops);
105031+ atomic_inc_unchecked(&sk->sk_drops);
105032 if (is_udp4)
105033 UDP_INC_STATS_USER(sock_net(sk),
105034 UDP_MIB_INERRORS,
105035@@ -701,7 +705,7 @@ csum_error:
105036 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
105037 drop:
105038 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
105039- atomic_inc(&sk->sk_drops);
105040+ atomic_inc_unchecked(&sk->sk_drops);
105041 kfree_skb(skb);
105042 return -1;
105043 }
105044@@ -740,7 +744,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
105045 if (likely(skb1 == NULL))
105046 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
105047 if (!skb1) {
105048- atomic_inc(&sk->sk_drops);
105049+ atomic_inc_unchecked(&sk->sk_drops);
105050 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
105051 IS_UDPLITE(sk));
105052 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
105053@@ -915,6 +919,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
105054 goto csum_error;
105055
105056 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
105057+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
105058+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
105059+#endif
105060 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
105061
105062 kfree_skb(skb);
105063diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
105064index 2a0bbda..442240d 100644
105065--- a/net/ipv6/xfrm6_policy.c
105066+++ b/net/ipv6/xfrm6_policy.c
105067@@ -170,8 +170,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
105068 case IPPROTO_DCCP:
105069 if (!onlyproto && (nh + offset + 4 < skb->data ||
105070 pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
105071- __be16 *ports = (__be16 *)exthdr;
105072+ __be16 *ports;
105073
105074+ nh = skb_network_header(skb);
105075+ ports = (__be16 *)(nh + offset);
105076 fl6->fl6_sport = ports[!!reverse];
105077 fl6->fl6_dport = ports[!reverse];
105078 }
105079@@ -180,8 +182,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
105080
105081 case IPPROTO_ICMPV6:
105082 if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) {
105083- u8 *icmp = (u8 *)exthdr;
105084+ u8 *icmp;
105085
105086+ nh = skb_network_header(skb);
105087+ icmp = (u8 *)(nh + offset);
105088 fl6->fl6_icmp_type = icmp[0];
105089 fl6->fl6_icmp_code = icmp[1];
105090 }
105091@@ -192,8 +196,9 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
105092 case IPPROTO_MH:
105093 if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) {
105094 struct ip6_mh *mh;
105095- mh = (struct ip6_mh *)exthdr;
105096
105097+ nh = skb_network_header(skb);
105098+ mh = (struct ip6_mh *)(nh + offset);
105099 fl6->fl6_mh_type = mh->ip6mh_type;
105100 }
105101 fl6->flowi6_proto = nexthdr;
105102@@ -212,11 +217,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
105103 }
105104 }
105105
105106-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
105107+static int xfrm6_garbage_collect(struct dst_ops *ops)
105108 {
105109 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
105110
105111- xfrm6_policy_afinfo.garbage_collect(net);
105112+ xfrm_garbage_collect_deferred(net);
105113 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
105114 }
105115
105116@@ -329,19 +334,19 @@ static struct ctl_table xfrm6_policy_table[] = {
105117
105118 static int __net_init xfrm6_net_init(struct net *net)
105119 {
105120- struct ctl_table *table;
105121+ ctl_table_no_const *table = NULL;
105122 struct ctl_table_header *hdr;
105123
105124- table = xfrm6_policy_table;
105125 if (!net_eq(net, &init_net)) {
105126- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
105127+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
105128 if (!table)
105129 goto err_alloc;
105130
105131 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
105132- }
105133+ hdr = register_net_sysctl(net, "net/ipv6", table);
105134+ } else
105135+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
105136
105137- hdr = register_net_sysctl(net, "net/ipv6", table);
105138 if (!hdr)
105139 goto err_reg;
105140
105141@@ -349,8 +354,7 @@ static int __net_init xfrm6_net_init(struct net *net)
105142 return 0;
105143
105144 err_reg:
105145- if (!net_eq(net, &init_net))
105146- kfree(table);
105147+ kfree(table);
105148 err_alloc:
105149 return -ENOMEM;
105150 }
105151diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
105152index e15c16a..7cf07aa 100644
105153--- a/net/ipx/ipx_proc.c
105154+++ b/net/ipx/ipx_proc.c
105155@@ -289,7 +289,7 @@ int __init ipx_proc_init(void)
105156 struct proc_dir_entry *p;
105157 int rc = -ENOMEM;
105158
105159- ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net);
105160+ ipx_proc_dir = proc_mkdir_restrict("ipx", init_net.proc_net);
105161
105162 if (!ipx_proc_dir)
105163 goto out;
105164diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
105165index 61ceb4c..e788eb8 100644
105166--- a/net/irda/ircomm/ircomm_tty.c
105167+++ b/net/irda/ircomm/ircomm_tty.c
105168@@ -317,10 +317,10 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
105169 add_wait_queue(&port->open_wait, &wait);
105170
105171 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
105172- __FILE__, __LINE__, tty->driver->name, port->count);
105173+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
105174
105175 spin_lock_irqsave(&port->lock, flags);
105176- port->count--;
105177+ atomic_dec(&port->count);
105178 port->blocked_open++;
105179 spin_unlock_irqrestore(&port->lock, flags);
105180
105181@@ -355,7 +355,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
105182 }
105183
105184 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
105185- __FILE__, __LINE__, tty->driver->name, port->count);
105186+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
105187
105188 schedule();
105189 }
105190@@ -365,12 +365,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
105191
105192 spin_lock_irqsave(&port->lock, flags);
105193 if (!tty_hung_up_p(filp))
105194- port->count++;
105195+ atomic_inc(&port->count);
105196 port->blocked_open--;
105197 spin_unlock_irqrestore(&port->lock, flags);
105198
105199 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
105200- __FILE__, __LINE__, tty->driver->name, port->count);
105201+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
105202
105203 if (!retval)
105204 port->flags |= ASYNC_NORMAL_ACTIVE;
105205@@ -444,12 +444,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
105206
105207 /* ++ is not atomic, so this should be protected - Jean II */
105208 spin_lock_irqsave(&self->port.lock, flags);
105209- self->port.count++;
105210+ atomic_inc(&self->port.count);
105211 spin_unlock_irqrestore(&self->port.lock, flags);
105212 tty_port_tty_set(&self->port, tty);
105213
105214 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
105215- self->line, self->port.count);
105216+ self->line, atomic_read(&self->port.count));
105217
105218 /* Not really used by us, but lets do it anyway */
105219 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
105220@@ -985,7 +985,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
105221 tty_kref_put(port->tty);
105222 }
105223 port->tty = NULL;
105224- port->count = 0;
105225+ atomic_set(&port->count, 0);
105226 spin_unlock_irqrestore(&port->lock, flags);
105227
105228 wake_up_interruptible(&port->open_wait);
105229@@ -1342,7 +1342,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
105230 seq_putc(m, '\n');
105231
105232 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
105233- seq_printf(m, "Open count: %d\n", self->port.count);
105234+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
105235 seq_printf(m, "Max data size: %d\n", self->max_data_size);
105236 seq_printf(m, "Max header size: %d\n", self->max_header_size);
105237
105238diff --git a/net/irda/irproc.c b/net/irda/irproc.c
105239index b9ac598..f88cc56 100644
105240--- a/net/irda/irproc.c
105241+++ b/net/irda/irproc.c
105242@@ -66,7 +66,7 @@ void __init irda_proc_register(void)
105243 {
105244 int i;
105245
105246- proc_irda = proc_mkdir("irda", init_net.proc_net);
105247+ proc_irda = proc_mkdir_restrict("irda", init_net.proc_net);
105248 if (proc_irda == NULL)
105249 return;
105250
105251diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
105252index a089b6b..3ca3b60 100644
105253--- a/net/iucv/af_iucv.c
105254+++ b/net/iucv/af_iucv.c
105255@@ -686,10 +686,10 @@ static void __iucv_auto_name(struct iucv_sock *iucv)
105256 {
105257 char name[12];
105258
105259- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
105260+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
105261 while (__iucv_get_sock_by_name(name)) {
105262 sprintf(name, "%08x",
105263- atomic_inc_return(&iucv_sk_list.autobind_name));
105264+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
105265 }
105266 memcpy(iucv->src_name, name, 8);
105267 }
105268diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
105269index da78793..bdd78cf 100644
105270--- a/net/iucv/iucv.c
105271+++ b/net/iucv/iucv.c
105272@@ -702,7 +702,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
105273 return NOTIFY_OK;
105274 }
105275
105276-static struct notifier_block __refdata iucv_cpu_notifier = {
105277+static struct notifier_block iucv_cpu_notifier = {
105278 .notifier_call = iucv_cpu_notify,
105279 };
105280
105281diff --git a/net/key/af_key.c b/net/key/af_key.c
105282index 1847ec4..26ef732 100644
105283--- a/net/key/af_key.c
105284+++ b/net/key/af_key.c
105285@@ -3049,10 +3049,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
105286 static u32 get_acqseq(void)
105287 {
105288 u32 res;
105289- static atomic_t acqseq;
105290+ static atomic_unchecked_t acqseq;
105291
105292 do {
105293- res = atomic_inc_return(&acqseq);
105294+ res = atomic_inc_return_unchecked(&acqseq);
105295 } while (!res);
105296 return res;
105297 }
105298diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
105299index edb78e6..8dc654a 100644
105300--- a/net/l2tp/l2tp_eth.c
105301+++ b/net/l2tp/l2tp_eth.c
105302@@ -42,12 +42,12 @@ struct l2tp_eth {
105303 struct sock *tunnel_sock;
105304 struct l2tp_session *session;
105305 struct list_head list;
105306- atomic_long_t tx_bytes;
105307- atomic_long_t tx_packets;
105308- atomic_long_t tx_dropped;
105309- atomic_long_t rx_bytes;
105310- atomic_long_t rx_packets;
105311- atomic_long_t rx_errors;
105312+ atomic_long_unchecked_t tx_bytes;
105313+ atomic_long_unchecked_t tx_packets;
105314+ atomic_long_unchecked_t tx_dropped;
105315+ atomic_long_unchecked_t rx_bytes;
105316+ atomic_long_unchecked_t rx_packets;
105317+ atomic_long_unchecked_t rx_errors;
105318 };
105319
105320 /* via l2tp_session_priv() */
105321@@ -98,10 +98,10 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
105322 int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
105323
105324 if (likely(ret == NET_XMIT_SUCCESS)) {
105325- atomic_long_add(len, &priv->tx_bytes);
105326- atomic_long_inc(&priv->tx_packets);
105327+ atomic_long_add_unchecked(len, &priv->tx_bytes);
105328+ atomic_long_inc_unchecked(&priv->tx_packets);
105329 } else {
105330- atomic_long_inc(&priv->tx_dropped);
105331+ atomic_long_inc_unchecked(&priv->tx_dropped);
105332 }
105333 return NETDEV_TX_OK;
105334 }
105335@@ -111,12 +111,12 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
105336 {
105337 struct l2tp_eth *priv = netdev_priv(dev);
105338
105339- stats->tx_bytes = atomic_long_read(&priv->tx_bytes);
105340- stats->tx_packets = atomic_long_read(&priv->tx_packets);
105341- stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
105342- stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
105343- stats->rx_packets = atomic_long_read(&priv->rx_packets);
105344- stats->rx_errors = atomic_long_read(&priv->rx_errors);
105345+ stats->tx_bytes = atomic_long_read_unchecked(&priv->tx_bytes);
105346+ stats->tx_packets = atomic_long_read_unchecked(&priv->tx_packets);
105347+ stats->tx_dropped = atomic_long_read_unchecked(&priv->tx_dropped);
105348+ stats->rx_bytes = atomic_long_read_unchecked(&priv->rx_bytes);
105349+ stats->rx_packets = atomic_long_read_unchecked(&priv->rx_packets);
105350+ stats->rx_errors = atomic_long_read_unchecked(&priv->rx_errors);
105351 return stats;
105352 }
105353
105354@@ -166,15 +166,15 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
105355 nf_reset(skb);
105356
105357 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
105358- atomic_long_inc(&priv->rx_packets);
105359- atomic_long_add(data_len, &priv->rx_bytes);
105360+ atomic_long_inc_unchecked(&priv->rx_packets);
105361+ atomic_long_add_unchecked(data_len, &priv->rx_bytes);
105362 } else {
105363- atomic_long_inc(&priv->rx_errors);
105364+ atomic_long_inc_unchecked(&priv->rx_errors);
105365 }
105366 return;
105367
105368 error:
105369- atomic_long_inc(&priv->rx_errors);
105370+ atomic_long_inc_unchecked(&priv->rx_errors);
105371 kfree_skb(skb);
105372 }
105373
105374diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
105375index 1a3c7e0..80f8b0c 100644
105376--- a/net/llc/llc_proc.c
105377+++ b/net/llc/llc_proc.c
105378@@ -247,7 +247,7 @@ int __init llc_proc_init(void)
105379 int rc = -ENOMEM;
105380 struct proc_dir_entry *p;
105381
105382- llc_proc_dir = proc_mkdir("llc", init_net.proc_net);
105383+ llc_proc_dir = proc_mkdir_restrict("llc", init_net.proc_net);
105384 if (!llc_proc_dir)
105385 goto out;
105386
105387diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
105388index 927b4ea..88a30e2 100644
105389--- a/net/mac80211/cfg.c
105390+++ b/net/mac80211/cfg.c
105391@@ -540,7 +540,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
105392 ret = ieee80211_vif_use_channel(sdata, chandef,
105393 IEEE80211_CHANCTX_EXCLUSIVE);
105394 }
105395- } else if (local->open_count == local->monitors) {
105396+ } else if (local_read(&local->open_count) == local->monitors) {
105397 local->_oper_chandef = *chandef;
105398 ieee80211_hw_config(local, 0);
105399 }
105400@@ -3286,7 +3286,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
105401 else
105402 local->probe_req_reg--;
105403
105404- if (!local->open_count)
105405+ if (!local_read(&local->open_count))
105406 break;
105407
105408 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
105409@@ -3420,8 +3420,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
105410 if (chanctx_conf) {
105411 *chandef = chanctx_conf->def;
105412 ret = 0;
105413- } else if (local->open_count > 0 &&
105414- local->open_count == local->monitors &&
105415+ } else if (local_read(&local->open_count) > 0 &&
105416+ local_read(&local->open_count) == local->monitors &&
105417 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
105418 if (local->use_chanctx)
105419 *chandef = local->monitor_chandef;
105420diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
105421index ef7a089..fe1caf7 100644
105422--- a/net/mac80211/ieee80211_i.h
105423+++ b/net/mac80211/ieee80211_i.h
105424@@ -28,6 +28,7 @@
105425 #include <net/ieee80211_radiotap.h>
105426 #include <net/cfg80211.h>
105427 #include <net/mac80211.h>
105428+#include <asm/local.h>
105429 #include "key.h"
105430 #include "sta_info.h"
105431 #include "debug.h"
105432@@ -1055,7 +1056,7 @@ struct ieee80211_local {
105433 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
105434 spinlock_t queue_stop_reason_lock;
105435
105436- int open_count;
105437+ local_t open_count;
105438 int monitors, cooked_mntrs;
105439 /* number of interfaces with corresponding FIF_ flags */
105440 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
105441diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
105442index f75e5f1..3d9ad4f 100644
105443--- a/net/mac80211/iface.c
105444+++ b/net/mac80211/iface.c
105445@@ -531,7 +531,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
105446 break;
105447 }
105448
105449- if (local->open_count == 0) {
105450+ if (local_read(&local->open_count) == 0) {
105451 res = drv_start(local);
105452 if (res)
105453 goto err_del_bss;
105454@@ -578,7 +578,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
105455 res = drv_add_interface(local, sdata);
105456 if (res)
105457 goto err_stop;
105458- } else if (local->monitors == 0 && local->open_count == 0) {
105459+ } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
105460 res = ieee80211_add_virtual_monitor(local);
105461 if (res)
105462 goto err_stop;
105463@@ -687,7 +687,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
105464 atomic_inc(&local->iff_promiscs);
105465
105466 if (coming_up)
105467- local->open_count++;
105468+ local_inc(&local->open_count);
105469
105470 if (hw_reconf_flags)
105471 ieee80211_hw_config(local, hw_reconf_flags);
105472@@ -725,7 +725,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
105473 err_del_interface:
105474 drv_remove_interface(local, sdata);
105475 err_stop:
105476- if (!local->open_count)
105477+ if (!local_read(&local->open_count))
105478 drv_stop(local);
105479 err_del_bss:
105480 sdata->bss = NULL;
105481@@ -889,7 +889,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
105482 }
105483
105484 if (going_down)
105485- local->open_count--;
105486+ local_dec(&local->open_count);
105487
105488 switch (sdata->vif.type) {
105489 case NL80211_IFTYPE_AP_VLAN:
105490@@ -950,7 +950,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
105491 }
105492 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
105493
105494- if (local->open_count == 0)
105495+ if (local_read(&local->open_count) == 0)
105496 ieee80211_clear_tx_pending(local);
105497
105498 /*
105499@@ -990,7 +990,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
105500
105501 ieee80211_recalc_ps(local, -1);
105502
105503- if (local->open_count == 0) {
105504+ if (local_read(&local->open_count) == 0) {
105505 ieee80211_stop_device(local);
105506
105507 /* no reconfiguring after stop! */
105508@@ -1001,7 +1001,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
105509 ieee80211_configure_filter(local);
105510 ieee80211_hw_config(local, hw_reconf_flags);
105511
105512- if (local->monitors == local->open_count)
105513+ if (local->monitors == local_read(&local->open_count))
105514 ieee80211_add_virtual_monitor(local);
105515 }
105516
105517diff --git a/net/mac80211/main.c b/net/mac80211/main.c
105518index e0ab432..36b7b94 100644
105519--- a/net/mac80211/main.c
105520+++ b/net/mac80211/main.c
105521@@ -174,7 +174,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
105522 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
105523 IEEE80211_CONF_CHANGE_POWER);
105524
105525- if (changed && local->open_count) {
105526+ if (changed && local_read(&local->open_count)) {
105527 ret = drv_config(local, changed);
105528 /*
105529 * Goal:
105530diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
105531index 4c5192e..04cc0d8 100644
105532--- a/net/mac80211/pm.c
105533+++ b/net/mac80211/pm.c
105534@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
105535 struct ieee80211_sub_if_data *sdata;
105536 struct sta_info *sta;
105537
105538- if (!local->open_count)
105539+ if (!local_read(&local->open_count))
105540 goto suspend;
105541
105542 ieee80211_scan_cancel(local);
105543@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
105544 cancel_work_sync(&local->dynamic_ps_enable_work);
105545 del_timer_sync(&local->dynamic_ps_timer);
105546
105547- local->wowlan = wowlan && local->open_count;
105548+ local->wowlan = wowlan && local_read(&local->open_count);
105549 if (local->wowlan) {
105550 int err = drv_suspend(local, wowlan);
105551 if (err < 0) {
105552@@ -125,7 +125,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
105553 WARN_ON(!list_empty(&local->chanctx_list));
105554
105555 /* stop hardware - this must stop RX */
105556- if (local->open_count)
105557+ if (local_read(&local->open_count))
105558 ieee80211_stop_device(local);
105559
105560 suspend:
105561diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
105562index 8fdadfd..a4f72b8 100644
105563--- a/net/mac80211/rate.c
105564+++ b/net/mac80211/rate.c
105565@@ -720,7 +720,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
105566
105567 ASSERT_RTNL();
105568
105569- if (local->open_count)
105570+ if (local_read(&local->open_count))
105571 return -EBUSY;
105572
105573 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
105574diff --git a/net/mac80211/util.c b/net/mac80211/util.c
105575index 725af7a..a21a20a 100644
105576--- a/net/mac80211/util.c
105577+++ b/net/mac80211/util.c
105578@@ -1643,7 +1643,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
105579 }
105580 #endif
105581 /* everything else happens only if HW was up & running */
105582- if (!local->open_count)
105583+ if (!local_read(&local->open_count))
105584 goto wake_up;
105585
105586 /*
105587@@ -1869,7 +1869,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
105588 local->in_reconfig = false;
105589 barrier();
105590
105591- if (local->monitors == local->open_count && local->monitors > 0)
105592+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
105593 ieee80211_add_virtual_monitor(local);
105594
105595 /*
105596diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
105597index 6d77cce..36e2fc3 100644
105598--- a/net/netfilter/Kconfig
105599+++ b/net/netfilter/Kconfig
105600@@ -1096,6 +1096,16 @@ config NETFILTER_XT_MATCH_ESP
105601
105602 To compile it as a module, choose M here. If unsure, say N.
105603
105604+config NETFILTER_XT_MATCH_GRADM
105605+ tristate '"gradm" match support'
105606+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
105607+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
105608+ ---help---
105609+ The gradm match allows to match on grsecurity RBAC being enabled.
105610+ It is useful when iptables rules are applied early on bootup to
105611+ prevent connections to the machine (except from a trusted host)
105612+ while the RBAC system is disabled.
105613+
105614 config NETFILTER_XT_MATCH_HASHLIMIT
105615 tristate '"hashlimit" match support'
105616 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
105617diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
105618index fad5fdb..ba3672a 100644
105619--- a/net/netfilter/Makefile
105620+++ b/net/netfilter/Makefile
105621@@ -136,6 +136,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
105622 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
105623 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
105624 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
105625+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
105626 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
105627 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
105628 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
105629diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
105630index ec8114f..6b2bfba 100644
105631--- a/net/netfilter/ipset/ip_set_core.c
105632+++ b/net/netfilter/ipset/ip_set_core.c
105633@@ -1921,7 +1921,7 @@ done:
105634 return ret;
105635 }
105636
105637-static struct nf_sockopt_ops so_set __read_mostly = {
105638+static struct nf_sockopt_ops so_set = {
105639 .pf = PF_INET,
105640 .get_optmin = SO_IP_SET,
105641 .get_optmax = SO_IP_SET + 1,
105642diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
105643index 610e19c..08d0c3f 100644
105644--- a/net/netfilter/ipvs/ip_vs_conn.c
105645+++ b/net/netfilter/ipvs/ip_vs_conn.c
105646@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
105647 /* Increase the refcnt counter of the dest */
105648 ip_vs_dest_hold(dest);
105649
105650- conn_flags = atomic_read(&dest->conn_flags);
105651+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
105652 if (cp->protocol != IPPROTO_UDP)
105653 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
105654 flags = cp->flags;
105655@@ -899,7 +899,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
105656
105657 cp->control = NULL;
105658 atomic_set(&cp->n_control, 0);
105659- atomic_set(&cp->in_pkts, 0);
105660+ atomic_set_unchecked(&cp->in_pkts, 0);
105661
105662 cp->packet_xmit = NULL;
105663 cp->app = NULL;
105664@@ -1187,7 +1187,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
105665
105666 /* Don't drop the entry if its number of incoming packets is not
105667 located in [0, 8] */
105668- i = atomic_read(&cp->in_pkts);
105669+ i = atomic_read_unchecked(&cp->in_pkts);
105670 if (i > 8 || i < 0) return 0;
105671
105672 if (!todrop_rate[i]) return 0;
105673diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
105674index 5c34e8d..0d8eb7f 100644
105675--- a/net/netfilter/ipvs/ip_vs_core.c
105676+++ b/net/netfilter/ipvs/ip_vs_core.c
105677@@ -567,7 +567,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
105678 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
105679 /* do not touch skb anymore */
105680
105681- atomic_inc(&cp->in_pkts);
105682+ atomic_inc_unchecked(&cp->in_pkts);
105683 ip_vs_conn_put(cp);
105684 return ret;
105685 }
105686@@ -1711,7 +1711,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
105687 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
105688 pkts = sysctl_sync_threshold(ipvs);
105689 else
105690- pkts = atomic_add_return(1, &cp->in_pkts);
105691+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
105692
105693 if (ipvs->sync_state & IP_VS_STATE_MASTER)
105694 ip_vs_sync_conn(net, cp, pkts);
105695diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
105696index fd3f444..ab28fa24 100644
105697--- a/net/netfilter/ipvs/ip_vs_ctl.c
105698+++ b/net/netfilter/ipvs/ip_vs_ctl.c
105699@@ -794,7 +794,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
105700 */
105701 ip_vs_rs_hash(ipvs, dest);
105702 }
105703- atomic_set(&dest->conn_flags, conn_flags);
105704+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
105705
105706 /* bind the service */
105707 old_svc = rcu_dereference_protected(dest->svc, 1);
105708@@ -1654,7 +1654,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
105709 * align with netns init in ip_vs_control_net_init()
105710 */
105711
105712-static struct ctl_table vs_vars[] = {
105713+static ctl_table_no_const vs_vars[] __read_only = {
105714 {
105715 .procname = "amemthresh",
105716 .maxlen = sizeof(int),
105717@@ -1989,7 +1989,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
105718 " %-7s %-6d %-10d %-10d\n",
105719 &dest->addr.in6,
105720 ntohs(dest->port),
105721- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
105722+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
105723 atomic_read(&dest->weight),
105724 atomic_read(&dest->activeconns),
105725 atomic_read(&dest->inactconns));
105726@@ -2000,7 +2000,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
105727 "%-7s %-6d %-10d %-10d\n",
105728 ntohl(dest->addr.ip),
105729 ntohs(dest->port),
105730- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
105731+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
105732 atomic_read(&dest->weight),
105733 atomic_read(&dest->activeconns),
105734 atomic_read(&dest->inactconns));
105735@@ -2471,7 +2471,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
105736
105737 entry.addr = dest->addr.ip;
105738 entry.port = dest->port;
105739- entry.conn_flags = atomic_read(&dest->conn_flags);
105740+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
105741 entry.weight = atomic_read(&dest->weight);
105742 entry.u_threshold = dest->u_threshold;
105743 entry.l_threshold = dest->l_threshold;
105744@@ -3010,7 +3010,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
105745 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
105746 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
105747 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
105748- (atomic_read(&dest->conn_flags) &
105749+ (atomic_read_unchecked(&dest->conn_flags) &
105750 IP_VS_CONN_F_FWD_MASK)) ||
105751 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
105752 atomic_read(&dest->weight)) ||
105753@@ -3600,7 +3600,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
105754 {
105755 int idx;
105756 struct netns_ipvs *ipvs = net_ipvs(net);
105757- struct ctl_table *tbl;
105758+ ctl_table_no_const *tbl;
105759
105760 atomic_set(&ipvs->dropentry, 0);
105761 spin_lock_init(&ipvs->dropentry_lock);
105762diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
105763index 547ff33..c8c8117 100644
105764--- a/net/netfilter/ipvs/ip_vs_lblc.c
105765+++ b/net/netfilter/ipvs/ip_vs_lblc.c
105766@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
105767 * IPVS LBLC sysctl table
105768 */
105769 #ifdef CONFIG_SYSCTL
105770-static struct ctl_table vs_vars_table[] = {
105771+static ctl_table_no_const vs_vars_table[] __read_only = {
105772 {
105773 .procname = "lblc_expiration",
105774 .data = NULL,
105775diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
105776index 3f21a2f..a112e85 100644
105777--- a/net/netfilter/ipvs/ip_vs_lblcr.c
105778+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
105779@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
105780 * IPVS LBLCR sysctl table
105781 */
105782
105783-static struct ctl_table vs_vars_table[] = {
105784+static ctl_table_no_const vs_vars_table[] __read_only = {
105785 {
105786 .procname = "lblcr_expiration",
105787 .data = NULL,
105788diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
105789index eadffb2..c2feeae 100644
105790--- a/net/netfilter/ipvs/ip_vs_sync.c
105791+++ b/net/netfilter/ipvs/ip_vs_sync.c
105792@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
105793 cp = cp->control;
105794 if (cp) {
105795 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
105796- pkts = atomic_add_return(1, &cp->in_pkts);
105797+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
105798 else
105799 pkts = sysctl_sync_threshold(ipvs);
105800 ip_vs_sync_conn(net, cp->control, pkts);
105801@@ -771,7 +771,7 @@ control:
105802 if (!cp)
105803 return;
105804 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
105805- pkts = atomic_add_return(1, &cp->in_pkts);
105806+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
105807 else
105808 pkts = sysctl_sync_threshold(ipvs);
105809 goto sloop;
105810@@ -894,7 +894,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
105811
105812 if (opt)
105813 memcpy(&cp->in_seq, opt, sizeof(*opt));
105814- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
105815+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
105816 cp->state = state;
105817 cp->old_state = cp->state;
105818 /*
105819diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
105820index 56896a4..dfe3806 100644
105821--- a/net/netfilter/ipvs/ip_vs_xmit.c
105822+++ b/net/netfilter/ipvs/ip_vs_xmit.c
105823@@ -1114,7 +1114,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
105824 else
105825 rc = NF_ACCEPT;
105826 /* do not touch skb anymore */
105827- atomic_inc(&cp->in_pkts);
105828+ atomic_inc_unchecked(&cp->in_pkts);
105829 goto out;
105830 }
105831
105832@@ -1206,7 +1206,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
105833 else
105834 rc = NF_ACCEPT;
105835 /* do not touch skb anymore */
105836- atomic_inc(&cp->in_pkts);
105837+ atomic_inc_unchecked(&cp->in_pkts);
105838 goto out;
105839 }
105840
105841diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
105842index a4b5e2a..13b1de3 100644
105843--- a/net/netfilter/nf_conntrack_acct.c
105844+++ b/net/netfilter/nf_conntrack_acct.c
105845@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
105846 #ifdef CONFIG_SYSCTL
105847 static int nf_conntrack_acct_init_sysctl(struct net *net)
105848 {
105849- struct ctl_table *table;
105850+ ctl_table_no_const *table;
105851
105852 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
105853 GFP_KERNEL);
105854diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
105855index de88c4a..ec84234 100644
105856--- a/net/netfilter/nf_conntrack_core.c
105857+++ b/net/netfilter/nf_conntrack_core.c
105858@@ -1739,6 +1739,10 @@ void nf_conntrack_init_end(void)
105859 #define DYING_NULLS_VAL ((1<<30)+1)
105860 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
105861
105862+#ifdef CONFIG_GRKERNSEC_HIDESYM
105863+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
105864+#endif
105865+
105866 int nf_conntrack_init_net(struct net *net)
105867 {
105868 int ret = -ENOMEM;
105869@@ -1764,7 +1768,11 @@ int nf_conntrack_init_net(struct net *net)
105870 if (!net->ct.stat)
105871 goto err_pcpu_lists;
105872
105873+#ifdef CONFIG_GRKERNSEC_HIDESYM
105874+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08x", atomic_inc_return_unchecked(&conntrack_cache_id));
105875+#else
105876 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
105877+#endif
105878 if (!net->ct.slabname)
105879 goto err_slabname;
105880
105881diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
105882index 4e78c57..ec8fb74 100644
105883--- a/net/netfilter/nf_conntrack_ecache.c
105884+++ b/net/netfilter/nf_conntrack_ecache.c
105885@@ -264,7 +264,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
105886 #ifdef CONFIG_SYSCTL
105887 static int nf_conntrack_event_init_sysctl(struct net *net)
105888 {
105889- struct ctl_table *table;
105890+ ctl_table_no_const *table;
105891
105892 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
105893 GFP_KERNEL);
105894diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
105895index 5b3eae7..dd4b8fe 100644
105896--- a/net/netfilter/nf_conntrack_helper.c
105897+++ b/net/netfilter/nf_conntrack_helper.c
105898@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
105899
105900 static int nf_conntrack_helper_init_sysctl(struct net *net)
105901 {
105902- struct ctl_table *table;
105903+ ctl_table_no_const *table;
105904
105905 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
105906 GFP_KERNEL);
105907diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
105908index b65d586..beec902 100644
105909--- a/net/netfilter/nf_conntrack_proto.c
105910+++ b/net/netfilter/nf_conntrack_proto.c
105911@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
105912
105913 static void
105914 nf_ct_unregister_sysctl(struct ctl_table_header **header,
105915- struct ctl_table **table,
105916+ ctl_table_no_const **table,
105917 unsigned int users)
105918 {
105919 if (users > 0)
105920diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
105921index f641751..d3c5b51 100644
105922--- a/net/netfilter/nf_conntrack_standalone.c
105923+++ b/net/netfilter/nf_conntrack_standalone.c
105924@@ -471,7 +471,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
105925
105926 static int nf_conntrack_standalone_init_sysctl(struct net *net)
105927 {
105928- struct ctl_table *table;
105929+ ctl_table_no_const *table;
105930
105931 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
105932 GFP_KERNEL);
105933diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
105934index 7a394df..bd91a8a 100644
105935--- a/net/netfilter/nf_conntrack_timestamp.c
105936+++ b/net/netfilter/nf_conntrack_timestamp.c
105937@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
105938 #ifdef CONFIG_SYSCTL
105939 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
105940 {
105941- struct ctl_table *table;
105942+ ctl_table_no_const *table;
105943
105944 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
105945 GFP_KERNEL);
105946diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
105947index daad602..384be13 100644
105948--- a/net/netfilter/nf_log.c
105949+++ b/net/netfilter/nf_log.c
105950@@ -353,7 +353,7 @@ static const struct file_operations nflog_file_ops = {
105951
105952 #ifdef CONFIG_SYSCTL
105953 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
105954-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
105955+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
105956
105957 static int nf_log_proc_dostring(struct ctl_table *table, int write,
105958 void __user *buffer, size_t *lenp, loff_t *ppos)
105959@@ -384,14 +384,16 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
105960 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
105961 mutex_unlock(&nf_log_mutex);
105962 } else {
105963+ ctl_table_no_const nf_log_table = *table;
105964+
105965 mutex_lock(&nf_log_mutex);
105966 logger = rcu_dereference_protected(net->nf.nf_loggers[tindex],
105967 lockdep_is_held(&nf_log_mutex));
105968 if (!logger)
105969- table->data = "NONE";
105970+ nf_log_table.data = "NONE";
105971 else
105972- table->data = logger->name;
105973- r = proc_dostring(table, write, buffer, lenp, ppos);
105974+ nf_log_table.data = logger->name;
105975+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
105976 mutex_unlock(&nf_log_mutex);
105977 }
105978
105979diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
105980index c68c1e5..8b5d670 100644
105981--- a/net/netfilter/nf_sockopt.c
105982+++ b/net/netfilter/nf_sockopt.c
105983@@ -43,7 +43,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
105984 }
105985 }
105986
105987- list_add(&reg->list, &nf_sockopts);
105988+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
105989 out:
105990 mutex_unlock(&nf_sockopt_mutex);
105991 return ret;
105992@@ -53,7 +53,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
105993 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
105994 {
105995 mutex_lock(&nf_sockopt_mutex);
105996- list_del(&reg->list);
105997+ pax_list_del((struct list_head *)&reg->list);
105998 mutex_unlock(&nf_sockopt_mutex);
105999 }
106000 EXPORT_SYMBOL(nf_unregister_sockopt);
106001diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
106002index a11c5ff..aa413a7 100644
106003--- a/net/netfilter/nfnetlink_log.c
106004+++ b/net/netfilter/nfnetlink_log.c
106005@@ -79,7 +79,7 @@ static int nfnl_log_net_id __read_mostly;
106006 struct nfnl_log_net {
106007 spinlock_t instances_lock;
106008 struct hlist_head instance_table[INSTANCE_BUCKETS];
106009- atomic_t global_seq;
106010+ atomic_unchecked_t global_seq;
106011 };
106012
106013 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
106014@@ -561,7 +561,7 @@ __build_packet_message(struct nfnl_log_net *log,
106015 /* global sequence number */
106016 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
106017 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
106018- htonl(atomic_inc_return(&log->global_seq))))
106019+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
106020 goto nla_put_failure;
106021
106022 if (data_len) {
106023diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
106024index 108120f..5b169db 100644
106025--- a/net/netfilter/nfnetlink_queue_core.c
106026+++ b/net/netfilter/nfnetlink_queue_core.c
106027@@ -665,7 +665,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
106028 * returned by nf_queue. For instance, callers rely on -ECANCELED to
106029 * mean 'ignore this hook'.
106030 */
106031- if (IS_ERR(segs))
106032+ if (IS_ERR_OR_NULL(segs))
106033 goto out_err;
106034 queued = 0;
106035 err = 0;
106036diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
106037index 1840989..6895744 100644
106038--- a/net/netfilter/nft_compat.c
106039+++ b/net/netfilter/nft_compat.c
106040@@ -225,7 +225,7 @@ target_dump_info(struct sk_buff *skb, const struct xt_target *t, const void *in)
106041 /* We want to reuse existing compat_to_user */
106042 old_fs = get_fs();
106043 set_fs(KERNEL_DS);
106044- t->compat_to_user(out, in);
106045+ t->compat_to_user((void __force_user *)out, in);
106046 set_fs(old_fs);
106047 ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), out);
106048 kfree(out);
106049@@ -421,7 +421,7 @@ match_dump_info(struct sk_buff *skb, const struct xt_match *m, const void *in)
106050 /* We want to reuse existing compat_to_user */
106051 old_fs = get_fs();
106052 set_fs(KERNEL_DS);
106053- m->compat_to_user(out, in);
106054+ m->compat_to_user((void __force_user *)out, in);
106055 set_fs(old_fs);
106056 ret = nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(m->matchsize), out);
106057 kfree(out);
106058diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
106059new file mode 100644
106060index 0000000..c566332
106061--- /dev/null
106062+++ b/net/netfilter/xt_gradm.c
106063@@ -0,0 +1,51 @@
106064+/*
106065+ * gradm match for netfilter
106066